aboutsummaryrefslogtreecommitdiff
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c26
-rw-r--r--drivers/mtd/chips/jedec_probe.c13
-rw-r--r--drivers/mtd/cmdlinepart.c2
-rw-r--r--drivers/mtd/devices/m25p80.c8
-rw-r--r--drivers/mtd/inftlcore.c11
-rw-r--r--drivers/mtd/maps/Kconfig20
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c5
-rw-r--r--drivers/mtd/maps/integrator-flash.c232
-rw-r--r--drivers/mtd/maps/physmap.c40
-rw-r--r--drivers/mtd/maps/physmap_of.c199
-rw-r--r--drivers/mtd/maps/pmcmsp-ramroot.c104
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c22
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c23
-rw-r--r--drivers/mtd/maps/sa1100-flash.c23
-rw-r--r--drivers/mtd/maps/sbc8240.c250
-rw-r--r--drivers/mtd/maps/uclinux.c16
-rw-r--r--drivers/mtd/mtd_blkdevs.c8
-rw-r--r--drivers/mtd/mtdblock.c16
-rw-r--r--drivers/mtd/mtdchar.c303
-rw-r--r--drivers/mtd/mtdcore.c48
-rw-r--r--drivers/mtd/mtdpart.c20
-rw-r--r--drivers/mtd/nand/Kconfig24
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel_nand.c13
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c17
-rw-r--r--drivers/mtd/nand/davinci_nand.c342
-rw-r--r--drivers/mtd/nand/mxc_nand.c66
-rw-r--r--drivers/mtd/nand/nand_base.c3
-rw-r--r--drivers/mtd/nand/nand_ecc.c4
-rw-r--r--drivers/mtd/nand/omap2.c779
-rw-r--r--drivers/mtd/nand/orion_nand.c23
-rw-r--r--drivers/mtd/nand/plat_nand.c19
-rw-r--r--drivers/mtd/nand/s3c2410.c268
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c16
-rw-r--r--drivers/mtd/nftlcore.c31
-rw-r--r--drivers/mtd/onenand/omap2.c7
-rw-r--r--drivers/mtd/onenand/onenand_base.c862
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c14
-rw-r--r--drivers/mtd/onenand/onenand_sim.c81
-rw-r--r--drivers/mtd/ubi/Kconfig13
-rw-r--r--drivers/mtd/ubi/Makefile2
-rw-r--r--drivers/mtd/ubi/build.c167
-rw-r--r--drivers/mtd/ubi/cdev.c32
-rw-r--r--drivers/mtd/ubi/debug.c2
-rw-r--r--drivers/mtd/ubi/debug.h7
-rw-r--r--drivers/mtd/ubi/eba.c100
-rw-r--r--drivers/mtd/ubi/gluebi.c379
-rw-r--r--drivers/mtd/ubi/io.c93
-rw-r--r--drivers/mtd/ubi/kapi.c117
-rw-r--r--drivers/mtd/ubi/scan.c25
-rw-r--r--drivers/mtd/ubi/scan.h2
-rw-r--r--drivers/mtd/ubi/ubi-media.h12
-rw-r--r--drivers/mtd/ubi/ubi.h90
-rw-r--r--drivers/mtd/ubi/upd.c8
-rw-r--r--drivers/mtd/ubi/vmt.c65
-rw-r--r--drivers/mtd/ubi/wl.c187
57 files changed, 3896 insertions, 1366 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index c240454fd11..8664feebc93 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -46,6 +46,7 @@
#define MANUFACTURER_INTEL 0x0089
#define I82802AB 0x00ad
#define I82802AC 0x00ac
+#define PF38F4476 0x881c
#define MANUFACTURER_ST 0x0020
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
@@ -315,10 +316,20 @@ static struct cfi_fixup fixup_table[] = {
{ 0, 0, NULL, NULL }
};
+static void cfi_fixup_major_minor(struct cfi_private *cfi,
+ struct cfi_pri_intelext *extp)
+{
+ if (cfi->mfr == MANUFACTURER_INTEL &&
+ cfi->id == PF38F4476 && extp->MinorVersion == '3')
+ extp->MinorVersion = '1';
+}
+
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
+ struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp;
+ unsigned int extra_size = 0;
unsigned int extp_size = sizeof(*extp);
again:
@@ -326,6 +337,8 @@ read_pri_intelext(struct map_info *map, __u16 adr)
if (!extp)
return NULL;
+ cfi_fixup_major_minor(cfi, extp);
+
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
@@ -340,19 +353,24 @@ read_pri_intelext(struct map_info *map, __u16 adr)
extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
- if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
- unsigned int extra_size = 0;
- int nb_parts, i;
+ if (extp->MinorVersion >= '0') {
+ extra_size = 0;
/* Protection Register info */
extra_size += (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
+ }
+ if (extp->MinorVersion >= '1') {
/* Burst Read info */
extra_size += 2;
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
- extra_size += extp->extra[extra_size-1];
+ extra_size += extp->extra[extra_size - 1];
+ }
+
+ if (extp->MinorVersion >= '3') {
+ int nb_parts, i;
/* Number of hardware-partitions */
extra_size += 1;
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index e824b9b9b05..ccc4cfc7e4b 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -166,6 +166,7 @@
#define SST39LF040 0x00D7
#define SST39SF010A 0x00B5
#define SST39SF020A 0x00B6
+#define SST39SF040 0x00B7
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
@@ -1393,6 +1394,18 @@ static const struct amd_flash_info jedec_table[] = {
}
}, {
.mfr_id = MANUFACTURER_SST,
+ .dev_id = SST39SF040,
+ .name = "SST 39SF040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF040B,
.name = "SST 49LF040B",
.devtypes = CFI_DEVICETYPE_X8,
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 5011fa73f91..1479da6d3aa 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -194,7 +194,7 @@ static struct mtd_partition * newpart(char *s,
parts[this_part].name = extra_mem;
extra_mem += name_len + 1;
- dbg(("partition %d: name <%s>, offset %x, size %x, mask flags %x\n",
+ dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
this_part,
parts[this_part].name,
parts[this_part].offset,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index cc6369ea67d..10ed195c0c1 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -54,7 +54,7 @@
#define SR_SRWD 0x80 /* SR write protect */
/* Define max times to check status register before we give up. */
-#define MAX_READY_WAIT_JIFFIES (10 * HZ) /* eg. M25P128 specs 6s max sector erase */
+#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
#define CMD_SIZE 4
#ifdef CONFIG_M25PXX_USE_FAST_READ
@@ -500,6 +500,9 @@ static struct flash_info __devinitdata m25p_data [] = {
{ "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, },
{ "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, },
+ /* Macronix */
+ { "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, },
+
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
@@ -528,6 +531,7 @@ static struct flash_info __devinitdata m25p_data [] = {
{ "m25p64", 0x202017, 0, 64 * 1024, 128, },
{ "m25p128", 0x202018, 0, 256 * 1024, 64, },
+ { "m45pe10", 0x204011, 0, 64 * 1024, 2, },
{ "m45pe80", 0x204014, 0, 64 * 1024, 16, },
{ "m45pe16", 0x204015, 0, 64 * 1024, 32, },
@@ -732,7 +736,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->partitioned = 1;
return add_mtd_partitions(&flash->mtd, parts, nr_parts);
}
- } else if (data->nr_parts)
+ } else if (data && data->nr_parts)
dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
data->nr_parts, data->name);
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 73f05227dc8..d8cf29c01cc 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -226,7 +226,7 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
if (!desperate && inftl->numfreeEUNs < 2) {
DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free "
"EUNs (%d)\n", inftl->numfreeEUNs);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Scan for a free block */
@@ -281,7 +281,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
silly = MAX_LOOPS;
while (thisEUN < inftl->nb_blocks) {
for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
- if ((BlockMap[block] != 0xffff) || BlockDeleted[block])
+ if ((BlockMap[block] != BLOCK_NIL) ||
+ BlockDeleted[block])
continue;
if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
@@ -525,7 +526,7 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
if (!silly--) {
printk(KERN_WARNING "INFTL: infinite loop in "
"Virtual Unit Chain 0x%x\n", thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Skip to next block in chain */
@@ -549,7 +550,7 @@ hitused:
* waiting to be picked up. We're going to have to fold
* a chain to make room.
*/
- thisEUN = INFTL_makefreeblock(inftl, 0xffff);
+ thisEUN = INFTL_makefreeblock(inftl, BLOCK_NIL);
/*
* Hopefully we free something, lets try again.
@@ -631,7 +632,7 @@ hitused:
printk(KERN_WARNING "INFTL: error folding to make room for Virtual "
"Unit Chain 0x%x\n", thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/*
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 82923bd2d9c..7a58bd5522f 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -105,15 +105,6 @@ config MSP_FLASH_MAP_LIMIT
default "0x02000000"
depends on MSP_FLASH_MAP_LIMIT_32M
-config MTD_PMC_MSP_RAMROOT
- tristate "Embedded RAM block device for root on PMC-Sierra MSP"
- depends on PMC_MSP_EMBEDDED_ROOTFS && \
- (MTD_BLOCK || MTD_BLOCK_RO) && \
- MTD_RAM
- help
- This provides support for the embedded root file system
- on PMC MSP devices. This memory is mapped as a MTD block device.
-
config MTD_SUN_UFLASH
tristate "Sun Microsystems userflash support"
depends on SPARC && MTD_CFI && PCI
@@ -270,7 +261,7 @@ config MTD_ALCHEMY
config MTD_DILNETPC
tristate "CFI Flash device mapped on DIL/Net PC"
- depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT
+ depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
help
MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
@@ -293,13 +284,6 @@ config MTD_L440GX
BE VERY CAREFUL.
-config MTD_SBC8240
- tristate "Flash device on SBC8240"
- depends on MTD_JEDECPROBE && 8260
- help
- Flash access on the SBC8240 board from Wind River. See
- <http://www.windriver.com/products/sbc8240/>
-
config MTD_TQM8XXL
tristate "CFI Flash device mapped on TQM8XXL"
depends on MTD_CFI && TQM8xxL
@@ -501,7 +485,7 @@ config MTD_BFIN_ASYNC
If compiled as a module, it will be called bfin-async-flash.
config MTD_UCLINUX
- tristate "Generic uClinux RAM/ROM filesystem support"
+ bool "Generic uClinux RAM/ROM filesystem support"
depends on MTD_PARTITIONS && MTD_RAM && !MMU
help
Map driver to support image based filesystems for uClinux.
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 2dbc1bec848..5beb0662d72 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
-obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
@@ -51,7 +50,6 @@ obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
obj-$(CONFIG_MTD_NETtel) += nettel.o
obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
obj-$(CONFIG_MTD_H720X) += h720x-flash.o
-obj-$(CONFIG_MTD_SBC8240) += sbc8240.o
obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 576611f605d..365c77b1b87 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -40,6 +40,9 @@ struct async_state {
uint32_t flash_ambctl0, flash_ambctl1;
uint32_t save_ambctl0, save_ambctl1;
unsigned long irq_flags;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+#endif
};
static void switch_to_flash(struct async_state *state)
@@ -170,6 +173,7 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
if (ret > 0) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
add_mtd_partitions(state->mtd, pdata->parts, ret);
+ state->parts = pdata->parts;
} else if (pdata->nr_parts) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
@@ -193,6 +197,7 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
gpio_free(state->enet_flash_pin);
#ifdef CONFIG_MTD_PARTITIONS
del_mtd_partitions(state->mtd);
+ kfree(state->parts);
#endif
map_destroy(state->mtd);
kfree(state);
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index c9681a339a5..2aac41bde8b 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -36,27 +36,31 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
#include <asm/mach/flash.h>
#include <mach/hardware.h>
#include <asm/system.h>
-#ifdef CONFIG_ARCH_P720T
-#define FLASH_BASE (0x04000000)
-#define FLASH_SIZE (64*1024*1024)
-#endif
+struct armflash_subdev_info {
+ char *name;
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct flash_platform_data *plat;
+};
struct armflash_info {
- struct flash_platform_data *plat;
struct resource *res;
struct mtd_partition *parts;
struct mtd_info *mtd;
- struct map_info map;
+ int nr_subdev;
+ struct armflash_subdev_info subdev[0];
};
static void armflash_set_vpp(struct map_info *map, int on)
{
- struct armflash_info *info = container_of(map, struct armflash_info, map);
+ struct armflash_subdev_info *info =
+ container_of(map, struct armflash_subdev_info, map);
if (info->plat && info->plat->set_vpp)
info->plat->set_vpp(on);
@@ -64,32 +68,17 @@ static void armflash_set_vpp(struct map_info *map, int on)
static const char *probes[] = { "cmdlinepart", "RedBoot", "afs", NULL };
-static int armflash_probe(struct platform_device *dev)
+static int armflash_subdev_probe(struct armflash_subdev_info *subdev,
+ struct resource *res)
{
- struct flash_platform_data *plat = dev->dev.platform_data;
- struct resource *res = dev->resource;
- unsigned int size = res->end - res->start + 1;
- struct armflash_info *info;
- int err;
+ struct flash_platform_data *plat = subdev->plat;
+ resource_size_t size = res->end - res->start + 1;
void __iomem *base;
+ int err = 0;
- info = kzalloc(sizeof(struct armflash_info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto out;
- }
-
- info->plat = plat;
- if (plat && plat->init) {
- err = plat->init();
- if (err)
- goto no_resource;
- }
-
- info->res = request_mem_region(res->start, size, "armflash");
- if (!info->res) {
+ if (!request_mem_region(res->start, size, subdev->name)) {
err = -EBUSY;
- goto no_resource;
+ goto out;
}
base = ioremap(res->start, size);
@@ -101,27 +90,140 @@ static int armflash_probe(struct platform_device *dev)
/*
* look for CFI based flash parts fitted to this board
*/
- info->map.size = size;
- info->map.bankwidth = plat->width;
- info->map.phys = res->start;
- info->map.virt = base;
- info->map.name = dev_name(&dev->dev);
- info->map.set_vpp = armflash_set_vpp;
+ subdev->map.size = size;
+ subdev->map.bankwidth = plat->width;
+ subdev->map.phys = res->start;
+ subdev->map.virt = base;
+ subdev->map.name = subdev->name;
+ subdev->map.set_vpp = armflash_set_vpp;
- simple_map_init(&info->map);
+ simple_map_init(&subdev->map);
/*
* Also, the CFI layer automatically works out what size
* of chips we have, and does the necessary identification
* for us automatically.
*/
- info->mtd = do_map_probe(plat->map_name, &info->map);
- if (!info->mtd) {
+ subdev->mtd = do_map_probe(plat->map_name, &subdev->map);
+ if (!subdev->mtd) {
err = -ENXIO;
goto no_device;
}
- info->mtd->owner = THIS_MODULE;
+ subdev->mtd->owner = THIS_MODULE;
+
+ /* Successful? */
+ if (err == 0)
+ return err;
+
+ if (subdev->mtd)
+ map_destroy(subdev->mtd);
+ no_device:
+ iounmap(base);
+ no_mem:
+ release_mem_region(res->start, size);
+ out:
+ return err;
+}
+
+static void armflash_subdev_remove(struct armflash_subdev_info *subdev)
+{
+ if (subdev->mtd)
+ map_destroy(subdev->mtd);
+ if (subdev->map.virt)
+ iounmap(subdev->map.virt);
+ kfree(subdev->name);
+ subdev->name = NULL;
+ release_mem_region(subdev->map.phys, subdev->map.size);
+}
+
+static int armflash_probe(struct platform_device *dev)
+{
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ unsigned int size;
+ struct armflash_info *info;
+ int i, nr, err;
+
+ /* Count the number of devices */
+ for (nr = 0; ; nr++)
+ if (!platform_get_resource(dev, IORESOURCE_MEM, nr))
+ break;
+ if (nr == 0) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ size = sizeof(struct armflash_info) +
+ sizeof(struct armflash_subdev_info) * nr;
+ info = kzalloc(size, GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (plat && plat->init) {
+ err = plat->init();
+ if (err)
+ goto no_resource;
+ }
+
+ for (i = 0; i < nr; i++) {
+ struct armflash_subdev_info *subdev = &info->subdev[i];
+ struct resource *res;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, i);
+ if (!res)
+ break;
+
+ if (nr == 1)
+ /* No MTD concatenation, just use the default name */
+ subdev->name = kstrdup(dev_name(&dev->dev), GFP_KERNEL);
+ else
+ subdev->name = kasprintf(GFP_KERNEL, "%s-%d",
+ dev_name(&dev->dev), i);
+ if (!subdev->name) {
+ err = -ENOMEM;
+ break;
+ }
+ subdev->plat = plat;
+
+ err = armflash_subdev_probe(subdev, res);
+ if (err) {
+ kfree(subdev->name);
+ subdev->name = NULL;
+ break;
+ }
+ }
+ info->nr_subdev = i;
+
+ if (err)
+ goto subdev_err;
+
+ if (info->nr_subdev == 1)
+ info->mtd = info->subdev[0].mtd;
+ else if (info->nr_subdev > 1) {
+#ifdef CONFIG_MTD_CONCAT
+ struct mtd_info *cdev[info->nr_subdev];
+
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ for (i = 0; i < info->nr_subdev; i++)
+ cdev[i] = info->subdev[i].mtd;
+
+ info->mtd = mtd_concat_create(cdev, info->nr_subdev,
+ dev_name(&dev->dev));
+ if (info->mtd == NULL)
+ err = -ENXIO;
+#else
+ printk(KERN_ERR "armflash: multiple devices found but "
+ "MTD concat support disabled.\n");
+ err = -ENXIO;
+#endif
+ }
+
+ if (err < 0)
+ goto cleanup;
err = parse_mtd_partitions(info->mtd, probes, &info->parts, 0);
if (err > 0) {
@@ -131,28 +233,30 @@ static int armflash_probe(struct platform_device *dev)
"mtd partition registration failed: %d\n", err);
}
- if (err == 0)
+ if (err == 0) {
platform_set_drvdata(dev, info);
+ return err;
+ }
/*
- * If we got an error, free all resources.
+ * We got an error, free all resources.
*/
- if (err < 0) {
- if (info->mtd) {
- del_mtd_partitions(info->mtd);
- map_destroy(info->mtd);
- }
- kfree(info->parts);
-
- no_device:
- iounmap(base);
- no_mem:
- release_mem_region(res->start, size);
- no_resource:
- if (plat && plat->exit)
- plat->exit();
- kfree(info);
+ cleanup:
+ if (info->mtd) {
+ del_mtd_partitions(info->mtd);
+#ifdef CONFIG_MTD_CONCAT
+ if (info->mtd != info->subdev[0].mtd)
+ mtd_concat_destroy(info->mtd);
+#endif
}
+ kfree(info->parts);
+ subdev_err:
+ for (i = info->nr_subdev - 1; i >= 0; i--)
+ armflash_subdev_remove(&info->subdev[i]);
+ no_resource:
+ if (plat && plat->exit)
+ plat->exit();
+ kfree(info);
out:
return err;
}
@@ -160,22 +264,26 @@ static int armflash_probe(struct platform_device *dev)
static int armflash_remove(struct platform_device *dev)
{
struct armflash_info *info = platform_get_drvdata(dev);
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ int i;
platform_set_drvdata(dev, NULL);
if (info) {
if (info->mtd) {
del_mtd_partitions(info->mtd);
- map_destroy(info->mtd);
+#ifdef CONFIG_MTD_CONCAT
+ if (info->mtd != info->subdev[0].mtd)
+ mtd_concat_destroy(info->mtd);
+#endif
}
kfree(info->parts);
- iounmap(info->map.virt);
- release_resource(info->res);
- kfree(info->res);
+ for (i = info->nr_subdev - 1; i >= 0; i--)
+ armflash_subdev_remove(&info->subdev[i]);
- if (info->plat && info->plat->exit)
- info->plat->exit();
+ if (plat && plat->exit)
+ plat->exit();
kfree(info);
}
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 29a90115735..380648e9051 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -195,42 +195,6 @@ err_out:
}
#ifdef CONFIG_PM
-static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct physmap_flash_info *info = platform_get_drvdata(dev);
- int ret = 0;
- int i;
-
- for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->suspend) {
- ret = info->mtd[i]->suspend(info->mtd[i]);
- if (ret)
- goto fail;
- }
-
- return 0;
-fail:
- for (--i; i >= 0; --i)
- if (info->mtd[i]->suspend) {
- BUG_ON(!info->mtd[i]->resume);
- info->mtd[i]->resume(info->mtd[i]);
- }
-
- return ret;
-}
-
-static int physmap_flash_resume(struct platform_device *dev)
-{
- struct physmap_flash_info *info = platform_get_drvdata(dev);
- int i;
-
- for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->resume)
- info->mtd[i]->resume(info->mtd[i]);
-
- return 0;
-}
-
static void physmap_flash_shutdown(struct platform_device *dev)
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
@@ -242,16 +206,12 @@ static void physmap_flash_shutdown(struct platform_device *dev)
info->mtd[i]->resume(info->mtd[i]);
}
#else
-#define physmap_flash_suspend NULL
-#define physmap_flash_resume NULL
#define physmap_flash_shutdown NULL
#endif
static struct platform_driver physmap_flash_driver = {
.probe = physmap_flash_probe,
.remove = physmap_flash_remove,
- .suspend = physmap_flash_suspend,
- .resume = physmap_flash_resume,
.shutdown = physmap_flash_shutdown,
.driver = {
.name = "physmap-flash",
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c83a60fada5..39d357b2eb4 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -20,16 +20,23 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+struct of_flash_list {
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct resource *res;
+};
+
struct of_flash {
- struct mtd_info *mtd;
- struct map_info map;
- struct resource *res;
+ struct mtd_info *cmtd;
#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
#endif
+ int list_size; /* number of elements in of_flash_list */
+ struct of_flash_list list[0];
};
#ifdef CONFIG_MTD_PARTITIONS
@@ -88,30 +95,44 @@ static int parse_obsolete_partitions(struct of_device *dev,
static int of_flash_remove(struct of_device *dev)
{
struct of_flash *info;
+ int i;
info = dev_get_drvdata(&dev->dev);
if (!info)
return 0;
dev_set_drvdata(&dev->dev, NULL);
- if (info->mtd) {
+#ifdef CONFIG_MTD_CONCAT
+ if (info->cmtd != info->list[0].mtd) {
+ del_mtd_device(info->cmtd);
+ mtd_concat_destroy(info->cmtd);
+ }
+#endif
+
+ if (info->cmtd) {
if (OF_FLASH_PARTS(info)) {
- del_mtd_partitions(info->mtd);
+ del_mtd_partitions(info->cmtd);
kfree(OF_FLASH_PARTS(info));
} else {
- del_mtd_device(info->mtd);
+ del_mtd_device(info->cmtd);
}
- map_destroy(info->mtd);
}
- if (info->map.virt)
- iounmap(info->map.virt);
+ for (i = 0; i < info->list_size; i++) {
+ if (info->list[i].mtd)
+ map_destroy(info->list[i].mtd);
- if (info->res) {
- release_resource(info->res);
- kfree(info->res);
+ if (info->list[i].map.virt)
+ iounmap(info->list[i].map.virt);
+
+ if (info->list[i].res) {
+ release_resource(info->list[i].res);
+ kfree(info->list[i].res);
+ }
}
+ kfree(info);
+
return 0;
}
@@ -164,68 +185,130 @@ static int __devinit of_flash_probe(struct of_device *dev,
const char *probe_type = match->data;
const u32 *width;
int err;
-
- err = -ENXIO;
- if (of_address_to_resource(dp, 0, &res)) {
- dev_err(&dev->dev, "Can't get IO address from device tree\n");
+ int i;
+ int count;
+ const u32 *p;
+ int reg_tuple_size;
+ struct mtd_info **mtd_list = NULL;
+
+ reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
+
+ /*
+ * Get number of "reg" tuples. Scan for MTD devices on area's
+ * described by each "reg" region. This makes it possible (including
+ * the concat support) to support the Intel P30 48F4400 chips which
+ * consists internally of 2 non-identical NOR chips on one die.
+ */
+ p = of_get_property(dp, "reg", &count);
+ if (count % reg_tuple_size != 0) {
+ dev_err(&dev->dev, "Malformed reg property on %s\n",
+ dev->node->full_name);
+ err = -EINVAL;
goto err_out;
}
-
- dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
- (unsigned long long)res.start, (unsigned long long)res.end);
+ count /= reg_tuple_size;
err = -ENOMEM;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct of_flash) +
+ sizeof(struct of_flash_list) * count, GFP_KERNEL);
+ if (!info)
+ goto err_out;
+
+ mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
if (!info)
goto err_out;
dev_set_drvdata(&dev->dev, info);
- err = -EBUSY;
- info->res = request_mem_region(res.start, res.end - res.start + 1,
- dev_name(&dev->dev));
- if (!info->res)
- goto err_out;
+ for (i = 0; i < count; i++) {
+ err = -ENXIO;
+ if (of_address_to_resource(dp, i, &res)) {
+ dev_err(&dev->dev, "Can't get IO address from device"
+ " tree\n");
+ goto err_out;
+ }
- err = -ENXIO;
- width = of_get_property(dp, "bank-width", NULL);
- if (!width) {
- dev_err(&dev->dev, "Can't get bank width from device tree\n");
- goto err_out;
- }
+ dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
+ (unsigned long long)res.start,
+ (unsigned long long)res.end);
+
+ err = -EBUSY;
+ info->list[i].res = request_mem_region(res.start, res.end -
+ res.start + 1,
+ dev_name(&dev->dev));
+ if (!info->list[i].res)
+ goto err_out;
+
+ err = -ENXIO;
+ width = of_get_property(dp, "bank-width", NULL);
+ if (!width) {
+ dev_err(&dev->dev, "Can't get bank width from device"
+ " tree\n");
+ goto err_out;
+ }
- info->map.name = dev_name(&dev->dev);
- info->map.phys = res.start;
- info->map.size = res.end - res.start + 1;
- info->map.bankwidth = *width;
+ info->list[i].map.name = dev_name(&dev->dev);
+ info->list[i].map.phys = res.start;
+ info->list[i].map.size = res.end - res.start + 1;
+ info->list[i].map.bankwidth = *width;
+
+ err = -ENOMEM;
+ info->list[i].map.virt = ioremap(info->list[i].map.phys,
+ info->list[i].map.size);
+ if (!info->list[i].map.virt) {
+ dev_err(&dev->dev, "Failed to ioremap() flash"
+ " region\n");
+ goto err_out;
+ }
- err = -ENOMEM;
- info->map.virt = ioremap(info->map.phys, info->map.size);
- if (!info->map.virt) {
- dev_err(&dev->dev, "Failed to ioremap() flash region\n");
- goto err_out;
- }
+ simple_map_init(&info->list[i].map);
- simple_map_init(&info->map);
+ if (probe_type) {
+ info->list[i].mtd = do_map_probe(probe_type,
+ &info->list[i].map);
+ } else {
+ info->list[i].mtd = obsolete_probe(dev,
+ &info->list[i].map);
+ }
+ mtd_list[i] = info->list[i].mtd;
- if (probe_type)
- info->mtd = do_map_probe(probe_type, &info->map);
- else
- info->mtd = obsolete_probe(dev, &info->map);
+ err = -ENXIO;
+ if (!info->list[i].mtd) {
+ dev_err(&dev->dev, "do_map_probe() failed\n");
+ goto err_out;
+ } else {
+ info->list_size++;
+ }
+ info->list[i].mtd->owner = THIS_MODULE;
+ info->list[i].mtd->dev.parent = &dev->dev;
+ }
- err = -ENXIO;
- if (!info->mtd) {
- dev_err(&dev->dev, "do_map_probe() failed\n");
- goto err_out;
+ err = 0;
+ if (info->list_size == 1) {
+ info->cmtd = info->list[0].mtd;
+ } else if (info->list_size > 1) {
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+#ifdef CONFIG_MTD_CONCAT
+ info->cmtd = mtd_concat_create(mtd_list, info->list_size,
+ dev_name(&dev->dev));
+ if (info->cmtd == NULL)
+ err = -ENXIO;
+#else
+ printk(KERN_ERR "physmap_of: multiple devices "
+ "found but MTD concat support disabled.\n");
+ err = -ENXIO;
+#endif
}
- info->mtd->owner = THIS_MODULE;
- info->mtd->dev.parent = &dev->dev;
+ if (err)
+ goto err_out;
#ifdef CONFIG_MTD_PARTITIONS
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
- err = parse_mtd_partitions(info->mtd, part_probe_types,
- &info->parts, 0);
+ err = parse_mtd_partitions(info->cmtd, part_probe_types,
+ &info->parts, 0);
if (err < 0)
return err;
@@ -244,15 +327,19 @@ static int __devinit of_flash_probe(struct of_device *dev,
}
if (err > 0)
- add_mtd_partitions(info->mtd, info->parts, err);
+ add_mtd_partitions(info->cmtd, info->parts, err);
else
#endif
- add_mtd_device(info->mtd);
+ add_mtd_device(info->cmtd);
+
+ kfree(mtd_list);
return 0;
err_out:
+ kfree(mtd_list);
of_flash_remove(dev);
+
return err;
}
diff --git a/drivers/mtd/maps/pmcmsp-ramroot.c b/drivers/mtd/maps/pmcmsp-ramroot.c
deleted file mode 100644
index 30de5c0c09a..00000000000
--- a/drivers/mtd/maps/pmcmsp-ramroot.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Mapping of the rootfs in a physical region of memory
- *
- * Copyright (C) 2005-2007 PMC-Sierra Inc.
- * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/root_dev.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-
-#include <asm/io.h>
-
-#include <msp_prom.h>
-
-static struct mtd_info *rr_mtd;
-
-struct map_info rr_map = {
- .name = "ramroot",
- .bankwidth = 4,
-};
-
-static int __init init_rrmap(void)
-{
- void *ramroot_start;
- unsigned long ramroot_size;
-
- /* Check for supported rootfs types */
- if (get_ramroot(&ramroot_start, &ramroot_size)) {
- rr_map.phys = CPHYSADDR(ramroot_start);
- rr_map.size = ramroot_size;
-
- printk(KERN_NOTICE
- "PMC embedded root device: 0x%08lx @ 0x%08lx\n",
- rr_map.size, (unsigned long)rr_map.phys);
- } else {
- printk(KERN_ERR
- "init_rrmap: no supported embedded rootfs detected!\n");
- return -ENXIO;
- }
-
- /* Map rootfs to I/O space for block device driver */
- rr_map.virt = ioremap(rr_map.phys, rr_map.size);
- if (!rr_map.virt) {
- printk(KERN_ERR "Failed to ioremap\n");
- return -EIO;
- }
-
- simple_map_init(&rr_map);
-
- rr_mtd = do_map_probe("map_ram", &rr_map);
- if (rr_mtd) {
- rr_mtd->owner = THIS_MODULE;
-
- add_mtd_device(rr_mtd);
-
- return 0;
- }
-
- iounmap(rr_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_rrmap(void)
-{
- del_mtd_device(rr_mtd);
- map_destroy(rr_mtd);
-
- iounmap(rr_map.virt);
- rr_map.virt = NULL;
-}
-
-MODULE_AUTHOR("PMC-Sierra, Inc");
-MODULE_DESCRIPTION("MTD map driver for embedded PMC-Sierra MSP filesystem");
-MODULE_LICENSE("GPL");
-
-module_init(init_rrmap);
-module_exit(cleanup_rrmap);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 572d32fdf38..643aa06b599 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -140,24 +140,6 @@ static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
}
#ifdef CONFIG_PM
-static int pxa2xx_flash_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
- int ret = 0;
-
- if (info->mtd && info->mtd->suspend)
- ret = info->mtd->suspend(info->mtd);
- return ret;
-}
-
-static int pxa2xx_flash_resume(struct platform_device *dev)
-{
- struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd && info->mtd->resume)
- info->mtd->resume(info->mtd);
- return 0;
-}
static void pxa2xx_flash_shutdown(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
@@ -166,8 +148,6 @@ static void pxa2xx_flash_shutdown(struct platform_device *dev)
info->mtd->resume(info->mtd);
}
#else
-#define pxa2xx_flash_suspend NULL
-#define pxa2xx_flash_resume NULL
#define pxa2xx_flash_shutdown NULL
#endif
@@ -178,8 +158,6 @@ static struct platform_driver pxa2xx_flash_driver = {
},
.probe = pxa2xx_flash_probe,
.remove = __devexit_p(pxa2xx_flash_remove),
- .suspend = pxa2xx_flash_suspend,
- .resume = pxa2xx_flash_resume,
.shutdown = pxa2xx_flash_shutdown,
};
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index d39f0adac84..83ed64512c5 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -145,25 +145,6 @@ err_out:
}
#ifdef CONFIG_PM
-static int rbtx4939_flash_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd->suspend)
- return info->mtd->suspend(info->mtd);
- return 0;
-}
-
-static int rbtx4939_flash_resume(struct platform_device *dev)
-{
- struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd->resume)
- info->mtd->resume(info->mtd);
- return 0;
-}
-
static void rbtx4939_flash_shutdown(struct platform_device *dev)
{
struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
@@ -173,16 +154,12 @@ static void rbtx4939_flash_shutdown(struct platform_device *dev)
info->mtd->resume(info->mtd);
}
#else
-#define rbtx4939_flash_suspend NULL
-#define rbtx4939_flash_resume NULL
#define rbtx4939_flash_shutdown NULL
#endif
static struct platform_driver rbtx4939_flash_driver = {
.probe = rbtx4939_flash_probe,
.remove = rbtx4939_flash_remove,
- .suspend = rbtx4939_flash_suspend,
- .resume = rbtx4939_flash_resume,
.shutdown = rbtx4939_flash_shutdown,
.driver = {
.name = "rbtx4939-flash",
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 05e9362dc7f..c6210f5118d 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -415,25 +415,6 @@ static int __exit sa1100_mtd_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int sa1100_mtd_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct sa_info *info = platform_get_drvdata(dev);
- int ret = 0;
-
- if (info)
- ret = info->mtd->suspend(info->mtd);
-
- return ret;
-}
-
-static int sa1100_mtd_resume(struct platform_device *dev)
-{
- struct sa_info *info = platform_get_drvdata(dev);
- if (info)
- info->mtd->resume(info->mtd);
- return 0;
-}
-
static void sa1100_mtd_shutdown(struct platform_device *dev)
{
struct sa_info *info = platform_get_drvdata(dev);
@@ -441,16 +422,12 @@ static void sa1100_mtd_shutdown(struct platform_device *dev)
info->mtd->resume(info->mtd);
}
#else
-#define sa1100_mtd_suspend NULL
-#define sa1100_mtd_resume NULL
#define sa1100_mtd_shutdown NULL
#endif
static struct platform_driver sa1100_mtd_driver = {
.probe = sa1100_mtd_probe,
.remove = __exit_p(sa1100_mtd_remove),
- .suspend = sa1100_mtd_suspend,
- .resume = sa1100_mtd_resume,
.shutdown = sa1100_mtd_shutdown,
.driver = {
.name = "sa1100-mtd",
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c
deleted file mode 100644
index d5374cdcb16..00000000000
--- a/drivers/mtd/maps/sbc8240.c
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Handle mapping of the flash memory access routines on the SBC8240 board.
- *
- * Carolyn Smith, Tektronix, Inc.
- *
- * This code is GPLed
- */
-
-/*
- * The SBC8240 has 2 flash banks.
- * Bank 0 is a 512 KiB AMD AM29F040B; 8 x 64 KiB sectors.
- * It contains the U-Boot code (7 sectors) and the environment (1 sector).
- * Bank 1 is 4 x 1 MiB AMD AM29LV800BT; 15 x 64 KiB sectors, 1 x 32 KiB sector,
- * 2 x 8 KiB sectors, 1 x 16 KiB sectors.
- * Both parts are JEDEC compatible.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <asm/io.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/cfi.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
-#include <linux/mtd/partitions.h>
-#endif
-
-#define DEBUG
-
-#ifdef DEBUG
-# define debugk(fmt,args...) printk(fmt ,##args)
-#else
-# define debugk(fmt,args...)
-#endif
-
-
-#define WINDOW_ADDR0 0xFFF00000 /* 512 KiB */
-#define WINDOW_SIZE0 0x00080000
-#define BUSWIDTH0 1
-
-#define WINDOW_ADDR1 0xFF000000 /* 4 MiB */
-#define WINDOW_SIZE1 0x00400000
-#define BUSWIDTH1 8
-
-#define MSG_PREFIX "sbc8240:" /* prefix for our printk()'s */
-#define MTDID "sbc8240-%d" /* for mtdparts= partitioning */
-
-
-static struct map_info sbc8240_map[2] = {
- {
- .name = "sbc8240 Flash Bank #0",
- .size = WINDOW_SIZE0,
- .bankwidth = BUSWIDTH0,
- },
- {
- .name = "sbc8240 Flash Bank #1",
- .size = WINDOW_SIZE1,
- .bankwidth = BUSWIDTH1,
- }
-};
-
-#define NUM_FLASH_BANKS ARRAY_SIZE(sbc8240_map)
-
-/*
- * The following defines the partition layout of SBC8240 boards.
- *
- * See include/linux/mtd/partitions.h for definition of the
- * mtd_partition structure.
- *
- * The *_max_flash_size is the maximum possible mapped flash size
- * which is not necessarily the actual flash size. It must correspond
- * to the value specified in the mapping definition defined by the
- * "struct map_desc *_io_desc" for the corresponding machine.
- */
-
-#ifdef CONFIG_MTD_PARTITIONS
-
-static struct mtd_partition sbc8240_uboot_partitions [] = {
- /* Bank 0 */
- {
- .name = "U-boot", /* U-Boot Firmware */
- .offset = 0,
- .size = 0x00070000, /* 7 x 64 KiB sectors */
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "environment", /* U-Boot environment */
- .offset = 0x00070000,
- .size = 0x00010000, /* 1 x 64 KiB sector */
- },
-};
-
-static struct mtd_partition sbc8240_fs_partitions [] = {
- {
- .name = "jffs", /* JFFS filesystem */
- .offset = 0,
- .size = 0x003C0000, /* 4 * 15 * 64KiB */
- },
- {
- .name = "tmp32",
- .offset = 0x003C0000,
- .size = 0x00020000, /* 4 * 32KiB */
- },
- {
- .name = "tmp8a",
- .offset = 0x003E0000,
- .size = 0x00008000, /* 4 * 8KiB */
- },
- {
- .name = "tmp8b",
- .offset = 0x003E8000,
- .size = 0x00008000, /* 4 * 8KiB */
- },
- {
- .name = "tmp16",
- .offset = 0x003F0000,
- .size = 0x00010000, /* 4 * 16KiB */
- }
-};
-
-/* trivial struct to describe partition information */
-struct mtd_part_def
-{
- int nums;
- unsigned char *type;
- struct mtd_partition* mtd_part;
-};
-
-static struct mtd_info *sbc8240_mtd[NUM_FLASH_BANKS];
-static struct mtd_part_def sbc8240_part_banks[NUM_FLASH_BANKS];
-
-
-#endif /* CONFIG_MTD_PARTITIONS */
-
-
-static int __init init_sbc8240_mtd (void)
-{
- static struct _cjs {
- u_long addr;
- u_long size;
- } pt[NUM_FLASH_BANKS] = {
- {
- .addr = WINDOW_ADDR0,
- .size = WINDOW_SIZE0
- },
- {
- .addr = WINDOW_ADDR1,
- .size = WINDOW_SIZE1
- },
- };
-
- int devicesfound = 0;
- int i,j;
-
- for (i = 0; i < NUM_FLASH_BANKS; i++) {
- printk (KERN_NOTICE MSG_PREFIX
- "Probing 0x%08lx at 0x%08lx\n", pt[i].size, pt[i].addr);
-
- sbc8240_map[i].map_priv_1 =
- (unsigned long) ioremap (pt[i].addr, pt[i].size);
- if (!sbc8240_map[i].map_priv_1) {
- printk (MSG_PREFIX "failed to ioremap\n");
- for (j = 0; j < i; j++) {
- iounmap((void *) sbc8240_map[j].map_priv_1);
- sbc8240_map[j].map_priv_1 = 0;
- }
- return -EIO;
- }
- simple_map_init(&sbc8240_mtd[i]);
-
- sbc8240_mtd[i] = do_map_probe("jedec_probe", &sbc8240_map[i]);
-
- if (sbc8240_mtd[i]) {
- sbc8240_mtd[i]->module = THIS_MODULE;
- devicesfound++;
- } else {
- if (sbc8240_map[i].map_priv_1) {
- iounmap((void *) sbc8240_map[i].map_priv_1);
- sbc8240_map[i].map_priv_1 = 0;
- }
- }
- }
-
- if (!devicesfound) {
- printk(KERN_NOTICE MSG_PREFIX
- "No suppported flash chips found!\n");
- return -ENXIO;
- }
-
-#ifdef CONFIG_MTD_PARTITIONS
- sbc8240_part_banks[0].mtd_part = sbc8240_uboot_partitions;
- sbc8240_part_banks[0].type = "static image";
- sbc8240_part_banks[0].nums = ARRAY_SIZE(sbc8240_uboot_partitions);
- sbc8240_part_banks[1].mtd_part = sbc8240_fs_partitions;
- sbc8240_part_banks[1].type = "static file system";
- sbc8240_part_banks[1].nums = ARRAY_SIZE(sbc8240_fs_partitions);
-
- for (i = 0; i < NUM_FLASH_BANKS; i++) {
-
- if (!sbc8240_mtd[i]) continue;
- if (sbc8240_part_banks[i].nums == 0) {
- printk (KERN_NOTICE MSG_PREFIX
- "No partition info available, registering whole device\n");
- add_mtd_device(sbc8240_mtd[i]);
- } else {
- printk (KERN_NOTICE MSG_PREFIX
- "Using %s partition definition\n", sbc8240_part_banks[i].mtd_part->name);
- add_mtd_partitions (sbc8240_mtd[i],
- sbc8240_part_banks[i].mtd_part,
- sbc8240_part_banks[i].nums);
- }
- }
-#else
- printk(KERN_NOTICE MSG_PREFIX
- "Registering %d flash banks at once\n", devicesfound);
-
- for (i = 0; i < devicesfound; i++) {
- add_mtd_device(sbc8240_mtd[i]);
- }
-#endif /* CONFIG_MTD_PARTITIONS */
-
- return devicesfound == 0 ? -ENXIO : 0;
-}
-
-static void __exit cleanup_sbc8240_mtd (void)
-{
- int i;
-
- for (i = 0; i < NUM_FLASH_BANKS; i++) {
- if (sbc8240_mtd[i]) {
- del_mtd_device (sbc8240_mtd[i]);
- map_destroy (sbc8240_mtd[i]);
- }
- if (sbc8240_map[i].map_priv_1) {
- iounmap ((void *) sbc8240_map[i].map_priv_1);
- sbc8240_map[i].map_priv_1 = 0;
- }
- }
-}
-
-module_init (init_sbc8240_mtd);
-module_exit (cleanup_sbc8240_mtd);
-
-MODULE_LICENSE ("GPL");
-MODULE_AUTHOR ("Carolyn Smith <carolyn.smith@tektronix.com>");
-MODULE_DESCRIPTION ("MTD map driver for SBC8240 boards");
-
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 81756e39771..d4314fb8821 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -22,15 +22,19 @@
/****************************************************************************/
+extern char _ebss;
+
struct map_info uclinux_ram_map = {
.name = "RAM",
+ .phys = (unsigned long)&_ebss,
+ .size = 0,
};
-struct mtd_info *uclinux_ram_mtdinfo;
+static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
-struct mtd_partition uclinux_romfs[] = {
+static struct mtd_partition uclinux_romfs[] = {
{ .name = "ROMfs" }
};
@@ -38,7 +42,7 @@ struct mtd_partition uclinux_romfs[] = {
/****************************************************************************/
-int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
+static int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
@@ -55,12 +59,10 @@ static int __init uclinux_mtd_init(void)
{
struct mtd_info *mtd;
struct map_info *mapp;
- extern char _ebss;
- unsigned long addr = (unsigned long) &_ebss;
mapp = &uclinux_ram_map;
- mapp->phys = addr;
- mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(addr + 8))));
+ if (!mapp->size)
+ mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
mapp->bankwidth = 4;
printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index aaac3b6800b..7baba40c1ed 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -144,7 +144,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
struct mtd_blktrans_ops *tr = dev->tr;
int ret = -ENODEV;
- if (!try_module_get(dev->mtd->owner))
+ if (!get_mtd_device(NULL, dev->mtd->index))
goto out;
if (!try_module_get(tr->owner))
@@ -158,7 +158,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
ret = 0;
if (tr->open && (ret = tr->open(dev))) {
dev->mtd->usecount--;
- module_put(dev->mtd->owner);
+ put_mtd_device(dev->mtd);
out_tr:
module_put(tr->owner);
}
@@ -177,7 +177,7 @@ static int blktrans_release(struct gendisk *disk, fmode_t mode)
if (!ret) {
dev->mtd->usecount--;
- module_put(dev->mtd->owner);
+ put_mtd_device(dev->mtd);
module_put(tr->owner);
}
@@ -291,7 +291,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->private_data = new;
new->blkcore_priv = gd;
gd->queue = tr->blkcore_priv->rq;
- gd->driverfs_dev = new->mtd->dev.parent;
+ gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
set_disk_ro(gd, 1);
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 208c6faa035..77db5ce24d9 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -29,6 +29,8 @@ static struct mtdblk_dev {
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
} *mtdblks[MAX_MTD_DEVICES];
+static struct mutex mtdblks_lock;
+
/*
* Cache stuff...
*
@@ -270,15 +272,19 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
+ mutex_lock(&mtdblks_lock);
if (mtdblks[dev]) {
mtdblks[dev]->count++;
+ mutex_unlock(&mtdblks_lock);
return 0;
}
/* OK, it's not open. Create cache info for it */
mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
- if (!mtdblk)
+ if (!mtdblk) {
+ mutex_unlock(&mtdblks_lock);
return -ENOMEM;
+ }
mtdblk->count = 1;
mtdblk->mtd = mtd;
@@ -291,6 +297,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
}
mtdblks[dev] = mtdblk;
+ mutex_unlock(&mtdblks_lock);
DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
@@ -304,6 +311,8 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
+ mutex_lock(&mtdblks_lock);
+
mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
@@ -316,6 +325,9 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
vfree(mtdblk->cache_data);
kfree(mtdblk);
}
+
+ mutex_unlock(&mtdblks_lock);
+
DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
return 0;
@@ -376,6 +388,8 @@ static struct mtd_blktrans_ops mtdblock_tr = {
static int __init init_mtdblock(void)
{
+ mutex_init(&mtdblks_lock);
+
return register_mtd_blktrans(&mtdblock_tr);
}
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 763d3f0a1f4..5b081cb8435 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
+#include <linux/compat.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/compatmac.h>
@@ -355,6 +356,100 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
# define otp_select_filemode(f,m) -EOPNOTSUPP
#endif
+static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
+ uint64_t start, uint32_t length, void __user *ptr,
+ uint32_t __user *retp)
+{
+ struct mtd_oob_ops ops;
+ uint32_t retlen;
+ int ret = 0;
+
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EPERM;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ if (!mtd->write_oob)
+ ret = -EOPNOTSUPP;
+ else
+ ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT;
+
+ if (ret)
+ return ret;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = kmalloc(length, GFP_KERNEL);
+ if (!ops.oobbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(ops.oobbuf, ptr, length)) {
+ kfree(ops.oobbuf);
+ return -EFAULT;
+ }
+
+ start &= ~((uint64_t)mtd->oobsize - 1);
+ ret = mtd->write_oob(mtd, start, &ops);
+
+ if (ops.oobretlen > 0xFFFFFFFFU)
+ ret = -EOVERFLOW;
+ retlen = ops.oobretlen;
+ if (copy_to_user(retp, &retlen, sizeof(length)))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+ return ret;
+}
+
+static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
+ uint32_t length, void __user *ptr, uint32_t __user *retp)
+{
+ struct mtd_oob_ops ops;
+ int ret = 0;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ if (!mtd->read_oob)
+ ret = -EOPNOTSUPP;
+ else
+ ret = access_ok(VERIFY_WRITE, ptr,
+ length) ? 0 : -EFAULT;
+ if (ret)
+ return ret;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = kmalloc(length, GFP_KERNEL);
+ if (!ops.oobbuf)
+ return -ENOMEM;
+
+ start &= ~((uint64_t)mtd->oobsize - 1);
+ ret = mtd->read_oob(mtd, start, &ops);
+
+ if (put_user(ops.oobretlen, retp))
+ ret = -EFAULT;
+ else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
+ ops.oobretlen))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+ return ret;
+}
+
static int mtd_ioctl(struct inode *inode, struct file *file,
u_int cmd, u_long arg)
{
@@ -417,6 +512,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
break;
case MEMERASE:
+ case MEMERASE64:
{
struct erase_info *erase;
@@ -427,20 +523,32 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
if (!erase)
ret = -ENOMEM;
else {
- struct erase_info_user einfo;
-
wait_queue_head_t waitq;
DECLARE_WAITQUEUE(wait, current);
init_waitqueue_head(&waitq);
- if (copy_from_user(&einfo, argp,
- sizeof(struct erase_info_user))) {
- kfree(erase);
- return -EFAULT;
+ if (cmd == MEMERASE64) {
+ struct erase_info_user64 einfo64;
+
+ if (copy_from_user(&einfo64, argp,
+ sizeof(struct erase_info_user64))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo64.start;
+ erase->len = einfo64.length;
+ } else {
+ struct erase_info_user einfo32;
+
+ if (copy_from_user(&einfo32, argp,
+ sizeof(struct erase_info_user))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo32.start;
+ erase->len = einfo32.length;
}
- erase->addr = einfo.start;
- erase->len = einfo.length;
erase->mtd = mtd;
erase->callback = mtdchar_erase_callback;
erase->priv = (unsigned long)&waitq;
@@ -474,100 +582,56 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
case MEMWRITEOOB:
{
struct mtd_oob_buf buf;
- struct mtd_oob_ops ops;
- struct mtd_oob_buf __user *user_buf = argp;
- uint32_t retlen;
-
- if(!(file->f_mode & FMODE_WRITE))
- return -EPERM;
-
- if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
- return -EFAULT;
-
- if (buf.length > 4096)
- return -EINVAL;
-
- if (!mtd->write_oob)
- ret = -EOPNOTSUPP;
- else
- ret = access_ok(VERIFY_READ, buf.ptr,
- buf.length) ? 0 : EFAULT;
-
- if (ret)
- return ret;
-
- ops.ooblen = buf.length;
- ops.ooboffs = buf.start & (mtd->oobsize - 1);
- ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
-
- if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
- return -EINVAL;
-
- ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
- if (!ops.oobbuf)
- return -ENOMEM;
-
- if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
- kfree(ops.oobbuf);
- return -EFAULT;
- }
+ struct mtd_oob_buf __user *buf_user = argp;
- buf.start &= ~(mtd->oobsize - 1);
- ret = mtd->write_oob(mtd, buf.start, &ops);
-
- if (ops.oobretlen > 0xFFFFFFFFU)
- ret = -EOVERFLOW;
- retlen = ops.oobretlen;
- if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length)))
+ /* NOTE: writes return length to buf_user->length */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
-
- kfree(ops.oobbuf);
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->length);
break;
-
}
case MEMREADOOB:
{
struct mtd_oob_buf buf;
- struct mtd_oob_ops ops;
-
- if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
- return -EFAULT;
-
- if (buf.length > 4096)
- return -EINVAL;
+ struct mtd_oob_buf __user *buf_user = argp;
- if (!mtd->read_oob)
- ret = -EOPNOTSUPP;
+ /* NOTE: writes return length to buf_user->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
else
- ret = access_ok(VERIFY_WRITE, buf.ptr,
- buf.length) ? 0 : -EFAULT;
- if (ret)
- return ret;
-
- ops.ooblen = buf.length;
- ops.ooboffs = buf.start & (mtd->oobsize - 1);
- ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->start);
+ break;
+ }
- if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
- return -EINVAL;
+ case MEMWRITEOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
- ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
- if (!ops.oobbuf)
- return -ENOMEM;
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
+ break;
+ }
- buf.start &= ~(mtd->oobsize - 1);
- ret = mtd->read_oob(mtd, buf.start, &ops);
+ case MEMREADOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
- if (put_user(ops.oobretlen, (uint32_t __user *)argp))
- ret = -EFAULT;
- else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf,
- ops.oobretlen))
+ if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
-
- kfree(ops.oobbuf);
+ else
+ ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
break;
}
@@ -758,6 +822,68 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
return ret;
} /* memory_ioctl */
+#ifdef CONFIG_COMPAT
+
+struct mtd_oob_buf32 {
+ u_int32_t start;
+ u_int32_t length;
+ compat_caddr_t ptr; /* unsigned char* */
+};
+
+#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
+#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
+
+static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ void __user *argp = compat_ptr(arg);
+ int ret = 0;
+
+ lock_kernel();
+
+ switch (cmd) {
+ case MEMWRITEOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->length);
+ break;
+ }
+
+ case MEMREADOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ /* NOTE: writes return length to buf->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_readoob(mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->start);
+ break;
+ }
+ default:
+ ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
+ }
+
+ unlock_kernel();
+
+ return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
/*
* try to determine where a shared mapping can be made
* - only supported for NOMMU at the moment (MMU can't doesn't copy private
@@ -817,6 +943,9 @@ static const struct file_operations mtd_fops = {
.read = mtd_read,
.write = mtd_write,
.ioctl = mtd_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mtd_compat_ioctl,
+#endif
.open = mtd_open,
.release = mtd_close,
.mmap = mtd_mmap,
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index bccb4b1ffc4..00ebf7af746 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -23,8 +23,15 @@
#include "mtdcore.h"
-
-static struct class *mtd_class;
+static int mtd_cls_suspend(struct device *dev, pm_message_t state);
+static int mtd_cls_resume(struct device *dev);
+
+static struct class mtd_class = {
+ .name = "mtd",
+ .owner = THIS_MODULE,
+ .suspend = mtd_cls_suspend,
+ .resume = mtd_cls_resume,
+};
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
@@ -52,7 +59,26 @@ static void mtd_release(struct device *dev)
/* remove /dev/mtdXro node if needed */
if (index)
- device_destroy(mtd_class, index + 1);
+ device_destroy(&mtd_class, index + 1);
+}
+
+static int mtd_cls_suspend(struct device *dev, pm_message_t state)
+{
+ struct mtd_info *mtd = dev_to_mtd(dev);
+
+ if (mtd && mtd->suspend)
+ return mtd->suspend(mtd);
+ else
+ return 0;
+}
+
+static int mtd_cls_resume(struct device *dev)
+{
+ struct mtd_info *mtd = dev_to_mtd(dev);
+
+ if (mtd && mtd->resume)
+ mtd->resume(mtd);
+ return 0;
}
static ssize_t mtd_type_show(struct device *dev,
@@ -269,16 +295,17 @@ int add_mtd_device(struct mtd_info *mtd)
* physical device.
*/
mtd->dev.type = &mtd_devtype;
- mtd->dev.class = mtd_class;
+ mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
+ dev_set_drvdata(&mtd->dev, mtd);
if (device_register(&mtd->dev) != 0) {
mtd_table[i] = NULL;
break;
}
if (MTD_DEVT(i))
- device_create(mtd_class, mtd->dev.parent,
+ device_create(&mtd_class, mtd->dev.parent,
MTD_DEVT(i) + 1,
NULL, "mtd%dro", i);
@@ -604,11 +631,12 @@ done:
static int __init init_mtd(void)
{
- mtd_class = class_create(THIS_MODULE, "mtd");
+ int ret;
+ ret = class_register(&mtd_class);
- if (IS_ERR(mtd_class)) {
- pr_err("Error creating mtd class.\n");
- return PTR_ERR(mtd_class);
+ if (ret) {
+ pr_err("Error registering mtd class: %d\n", ret);
+ return ret;
}
#ifdef CONFIG_PROC_FS
if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
@@ -623,7 +651,7 @@ static void __exit cleanup_mtd(void)
if (proc_mtd)
remove_proc_entry( "mtd", NULL);
#endif /* CONFIG_PROC_FS */
- class_destroy(mtd_class);
+ class_unregister(&mtd_class);
}
module_init(init_mtd);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 29675edb44b..349fcbe5cc0 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -27,9 +27,7 @@ struct mtd_part {
struct mtd_info mtd;
struct mtd_info *master;
uint64_t offset;
- int index;
struct list_head list;
- int registered;
};
/*
@@ -321,8 +319,7 @@ int del_mtd_partitions(struct mtd_info *master)
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if (slave->master == master) {
list_del(&slave->list);
- if (slave->registered)
- del_mtd_device(&slave->mtd);
+ del_mtd_device(&slave->mtd);
kfree(slave);
}
@@ -395,7 +392,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
if (master->sync)
slave->mtd.sync = part_sync;
- if (!partno && master->suspend && master->resume) {
+ if (!partno && !master->dev.class && master->suspend && master->resume) {
slave->mtd.suspend = part_suspend;
slave->mtd.resume = part_resume;
}
@@ -412,7 +409,6 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
slave->mtd.erase = part_erase;
slave->master = master;
slave->offset = part->offset;
- slave->index = partno;
if (slave->offset == MTDPART_OFS_APPEND)
slave->offset = cur_offset;
@@ -500,15 +496,9 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
}
out_register:
- if (part->mtdp) {
- /* store the object pointer (caller may or may not register it*/
- *part->mtdp = &slave->mtd;
- slave->registered = 0;
- } else {
- /* register our partition */
- add_mtd_device(&slave->mtd);
- slave->registered = 1;
- }
+ /* register our partition */
+ add_mtd_device(&slave->mtd);
+
return slave;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index f3276897859..ce96c091f01 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -74,6 +74,12 @@ config MTD_NAND_AMS_DELTA
help
Support for NAND flash on Amstrad E3 (Delta).
+config MTD_NAND_OMAP2
+ tristate "NAND Flash device on OMAP2 and OMAP3"
+ depends on ARM && MTD_NAND && (ARCH_OMAP2 || ARCH_OMAP3)
+ help
+ Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
+
config MTD_NAND_TS7250
tristate "NAND Flash device on TS-7250 board"
depends on MACH_TS72XX
@@ -139,27 +145,27 @@ config MTD_NAND_PPCHAMELEONEVB
This enables the NAND flash driver on the PPChameleon EVB Board.
config MTD_NAND_S3C2410
- tristate "NAND Flash support for S3C2410/S3C2440 SoC"
- depends on ARCH_S3C2410
+ tristate "NAND Flash support for Samsung S3C SoCs"
+ depends on ARCH_S3C2410 || ARCH_S3C64XX
help
- This enables the NAND flash controller on the S3C2410 and S3C2440
+ This enables the NAND flash controller on the S3C24xx and S3C64xx
SoCs
No board specific support is done by this driver, each board
must advertise a platform_device for the driver to attach.
config MTD_NAND_S3C2410_DEBUG
- bool "S3C2410 NAND driver debug"
+ bool "Samsung S3C NAND driver debug"
depends on MTD_NAND_S3C2410
help
- Enable debugging of the S3C2410 NAND driver
+ Enable debugging of the S3C NAND driver
config MTD_NAND_S3C2410_HWECC
- bool "S3C2410 NAND Hardware ECC"
+ bool "Samsung S3C NAND Hardware ECC"
depends on MTD_NAND_S3C2410
help
- Enable the use of the S3C2410's internal ECC generator when
- using NAND. Early versions of the chip have had problems with
+ Enable the use of the controller's internal ECC generator when
+ using NAND. Early versions of the chips have had problems with
incorrect ECC generation, and if using these, the default of
software ECC is preferable.
@@ -171,7 +177,7 @@ config MTD_NAND_NDFC
NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
config MTD_NAND_S3C2410_CLKSTOP
- bool "S3C2410 NAND IDLE clock stop"
+ bool "Samsung S3C NAND IDLE clock stop"
depends on MTD_NAND_S3C2410
default n
help
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d33860ac42c..f3a786b3cff 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
+obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 47a33cec379..20c828ba940 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -47,6 +48,9 @@
#define no_ecc 0
#endif
+static int on_flash_bbt = 0;
+module_param(on_flash_bbt, int, 0);
+
/* Register access macros */
#define ecc_readl(add, reg) \
__raw_readl(add + ATMEL_ECC_##reg)
@@ -459,12 +463,17 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
if (host->board->det_pin) {
if (gpio_get_value(host->board->det_pin)) {
- printk("No SmartMedia card inserted.\n");
+ printk(KERN_INFO "No SmartMedia card inserted.\n");
res = ENXIO;
goto err_no_card;
}
}
+ if (on_flash_bbt) {
+ printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
+ nand_chip->options |= NAND_USE_FLASH_BBT;
+ }
+
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, 1)) {
res = -ENXIO;
@@ -525,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
&num_partitions);
if ((!partitions) || (num_partitions == 0)) {
- printk(KERN_ERR "atmel_nand: No parititions defined, or unsupported device.\n");
+ printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
res = ENXIO;
goto err_no_partitions;
}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 4c2a67ca801..8506e7e606f 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -458,7 +458,7 @@ static irqreturn_t bf5xx_nand_dma_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
+static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
uint8_t *buf, int is_read)
{
struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
@@ -496,11 +496,20 @@ static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
/* setup DMA register with Blackfin DMA API */
set_dma_config(CH_NFC, 0x0);
set_dma_start_addr(CH_NFC, (unsigned long) buf);
+
+/* The DMAs have different size on BF52x and BF54x */
+#ifdef CONFIG_BF52x
+ set_dma_x_count(CH_NFC, (page_size >> 1));
+ set_dma_x_modify(CH_NFC, 2);
+ val = DI_EN | WDSIZE_16;
+#endif
+
+#ifdef CONFIG_BF54x
set_dma_x_count(CH_NFC, (page_size >> 2));
set_dma_x_modify(CH_NFC, 4);
-
- /* setup write or read operation */
val = DI_EN | WDSIZE_32;
+#endif
+ /* setup write or read operation */
if (is_read)
val |= WNR;
set_dma_config(CH_NFC, val);
@@ -512,8 +521,6 @@ static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
else
bfin_write_NFC_PGCTL(0x2);
wait_for_completion(&info->dma_completion);
-
- return 0;
}
static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd,
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 02700f769b8..0fad6487e6f 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -44,7 +44,7 @@
* and some flavors of secondary chipselect (e.g. based on A12) as used
* with multichip packages.
*
- * The 1-bit ECC hardware is supported, but not yet the newer 4-bit ECC
+ * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
* available on chips like the DM355 and OMAP-L137 and needed with the
* more error-prone MLC NAND chips.
*
@@ -54,11 +54,14 @@
struct davinci_nand_info {
struct mtd_info mtd;
struct nand_chip chip;
+ struct nand_ecclayout ecclayout;
struct device *dev;
struct clk *clk;
bool partitioned;
+ bool is_readmode;
+
void __iomem *base;
void __iomem *vaddr;
@@ -73,6 +76,7 @@ struct davinci_nand_info {
};
static DEFINE_SPINLOCK(davinci_nand_lock);
+static bool ecc4_busy;
#define to_davinci_nand(m) container_of(m, struct davinci_nand_info, mtd)
@@ -218,6 +222,192 @@ static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
/*----------------------------------------------------------------------*/
/*
+ * 4-bit hardware ECC ... context maintained over entire AEMIF
+ *
+ * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
+ * since that forces use of a problematic "infix OOB" layout.
+ * Among other things, it trashes manufacturer bad block markers.
+ * Also, and specific to this hardware, it ECC-protects the "prepad"
+ * in the OOB ... while having ECC protection for parts of OOB would
+ * seem useful, the current MTD stack sometimes wants to update the
+ * OOB without recomputing ECC.
+ */
+
+static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Start 4-bit ECC calculation for read/write */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val &= ~(0x03 << 4);
+ val |= (info->core_chipsel << 4) | BIT(12);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ info->is_readmode = (mode == NAND_ECC_READ);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/* Read raw ECC code after writing to NAND. */
+static void
+nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
+{
+ const u32 mask = 0x03ff03ff;
+
+ code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
+ code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
+ code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
+ code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
+}
+
+/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
+static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ u32 raw_ecc[4], *p;
+ unsigned i;
+
+ /* After a read, terminate ECC calculation by a dummy read
+ * of some 4-bit ECC register. ECC covers everything that
+ * was read; correct() just uses the hardware state, so
+ * ecc_code is not needed.
+ */
+ if (info->is_readmode) {
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+ return 0;
+ }
+
+ /* Pack eight raw 10-bit ecc values into ten bytes, making
+ * two passes which each convert four values (in upper and
+ * lower halves of two 32-bit words) into five bytes. The
+ * ROM boot loader uses this same packing scheme.
+ */
+ nand_davinci_readecc_4bit(info, raw_ecc);
+ for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
+ *ecc_code++ = p[0] & 0xff;
+ *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
+ *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
+ *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
+ *ecc_code++ = (p[1] >> 18) & 0xff;
+ }
+
+ return 0;
+}
+
+/* Correct up to 4 bits in data we just read, using state left in the
+ * hardware plus the ecc_code computed when it was first written.
+ */
+static int nand_davinci_correct_4bit(struct mtd_info *mtd,
+ u_char *data, u_char *ecc_code, u_char *null)
+{
+ int i;
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned short ecc10[8];
+ unsigned short *ecc16;
+ u32 syndrome[4];
+ unsigned num_errors, corrected;
+
+ /* All bytes 0xff? It's an erased page; ignore its ECC. */
+ for (i = 0; i < 10; i++) {
+ if (ecc_code[i] != 0xff)
+ goto compare;
+ }
+ return 0;
+
+compare:
+ /* Unpack ten bytes into eight 10 bit values. We know we're
+ * little-endian, and use type punning for less shifting/masking.
+ */
+ if (WARN_ON(0x01 & (unsigned) ecc_code))
+ return -EINVAL;
+ ecc16 = (unsigned short *)ecc_code;
+
+ ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
+ ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
+ ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
+ ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
+ ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
+ ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
+ ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
+ ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
+
+ /* Tell ECC controller about the expected ECC codes. */
+ for (i = 7; i >= 0; i--)
+ davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
+
+ /* Allow time for syndrome calculation ... then read it.
+ * A syndrome of all zeroes 0 means no detected errors.
+ */
+ davinci_nand_readl(info, NANDFSR_OFFSET);
+ nand_davinci_readecc_4bit(info, syndrome);
+ if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
+ return 0;
+
+ /* Start address calculation, and wait for it to complete.
+ * We _could_ start reading more data while this is working,
+ * to speed up the overall page read.
+ */
+ davinci_nand_writel(info, NANDFCR_OFFSET,
+ davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
+ for (;;) {
+ u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
+
+ switch ((fsr >> 8) & 0x0f) {
+ case 0: /* no error, should not happen */
+ return 0;
+ case 1: /* five or more errors detected */
+ return -EIO;
+ case 2: /* error addresses computed */
+ case 3:
+ num_errors = 1 + ((fsr >> 16) & 0x03);
+ goto correct;
+ default: /* still working on it */
+ cpu_relax();
+ continue;
+ }
+ }
+
+correct:
+ /* correct each error */
+ for (i = 0, corrected = 0; i < num_errors; i++) {
+ int error_address, error_value;
+
+ if (i > 1) {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD2_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL2_OFFSET);
+ } else {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD1_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL1_OFFSET);
+ }
+
+ if (i & 1) {
+ error_address >>= 16;
+ error_value >>= 16;
+ }
+ error_address &= 0x3ff;
+ error_address = (512 + 7) - error_address;
+
+ if (error_address < 512) {
+ data[error_address] ^= error_value;
+ corrected++;
+ }
+ }
+
+ return corrected;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
* NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
* how these chips are normally wired. This translates to both 8 and 16
* bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
@@ -294,6 +484,23 @@ static void __init nand_dm6446evm_flash_init(struct davinci_nand_info *info)
/*----------------------------------------------------------------------*/
+/* An ECC layout for using 4-bit ECC with small-page flash, storing
+ * ten ECC bytes plus the manufacturer's bad block marker byte, and
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_small __initconst = {
+ .eccbytes = 10,
+ .eccpos = { 0, 1, 2, 3, 4,
+ /* offset 5 holds the badblock marker */
+ 6, 7,
+ 13, 14, 15, },
+ .oobfree = {
+ {.offset = 8, .length = 5, },
+ {.offset = 16, },
+ },
+};
+
+
static int __init nand_davinci_probe(struct platform_device *pdev)
{
struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
@@ -306,6 +513,10 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
uint32_t val;
nand_ecc_modes_t ecc_mode;
+ /* insist on board-specific configuration */
+ if (!pdata)
+ return -ENODEV;
+
/* which external chipselect will we be managing? */
if (pdev->id < 0 || pdev->id > 3)
return -ENODEV;
@@ -351,7 +562,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.select_chip = nand_davinci_select_chip;
/* options such as NAND_USE_FLASH_BBT or 16-bit widths */
- info->chip.options = pdata ? pdata->options : 0;
+ info->chip.options = pdata->options;
info->ioaddr = (uint32_t __force) vaddr;
@@ -360,14 +571,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->mask_chipsel = pdata->mask_chipsel;
/* use nandboot-capable ALE/CLE masks by default */
- if (pdata && pdata->mask_ale)
- info->mask_ale = pdata->mask_cle;
- else
- info->mask_ale = MASK_ALE;
- if (pdata && pdata->mask_cle)
- info->mask_cle = pdata->mask_cle;
- else
- info->mask_cle = MASK_CLE;
+ info->mask_ale = pdata->mask_cle ? : MASK_ALE;
+ info->mask_cle = pdata->mask_cle ? : MASK_CLE;
/* Set address of hardware control function */
info->chip.cmd_ctrl = nand_davinci_hwcontrol;
@@ -377,30 +582,44 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.read_buf = nand_davinci_read_buf;
info->chip.write_buf = nand_davinci_write_buf;
- /* use board-specific ECC config; else, the best available */
- if (pdata)
- ecc_mode = pdata->ecc_mode;
- else
- ecc_mode = NAND_ECC_HW;
+ /* Use board-specific ECC config */
+ ecc_mode = pdata->ecc_mode;
+ ret = -EINVAL;
switch (ecc_mode) {
case NAND_ECC_NONE:
case NAND_ECC_SOFT:
+ pdata->ecc_bits = 0;
break;
case NAND_ECC_HW:
- info->chip.ecc.calculate = nand_davinci_calculate_1bit;
- info->chip.ecc.correct = nand_davinci_correct_1bit;
- info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ if (pdata->ecc_bits == 4) {
+ /* No sanity checks: CPUs must support this,
+ * and the chips may not use NAND_BUSWIDTH_16.
+ */
+
+ /* No sharing 4-bit hardware between chipselects yet */
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc4_busy)
+ ret = -EBUSY;
+ else
+ ecc4_busy = true;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ if (ret == -EBUSY)
+ goto err_ecc;
+
+ info->chip.ecc.calculate = nand_davinci_calculate_4bit;
+ info->chip.ecc.correct = nand_davinci_correct_4bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
+ info->chip.ecc.bytes = 10;
+ } else {
+ info->chip.ecc.calculate = nand_davinci_calculate_1bit;
+ info->chip.ecc.correct = nand_davinci_correct_1bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ info->chip.ecc.bytes = 3;
+ }
info->chip.ecc.size = 512;
- info->chip.ecc.bytes = 3;
break;
- case NAND_ECC_HW_SYNDROME:
- /* FIXME implement */
- info->chip.ecc.size = 512;
- info->chip.ecc.bytes = 10;
-
- dev_warn(&pdev->dev, "4-bit ECC nyet supported\n");
- /* FALL THROUGH */
default:
ret = -EINVAL;
goto err_ecc;
@@ -441,12 +660,56 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- ret = nand_scan(&info->mtd, pdata->mask_chipsel ? 2 : 1);
+ ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
goto err_scan;
}
+ /* Update ECC layout if needed ... for 1-bit HW ECC, the default
+ * is OK, but it allocates 6 bytes when only 3 are needed (for
+ * each 512 bytes). For the 4-bit HW ECC, that default is not
+ * usable: 10 bytes are needed, not 6.
+ */
+ if (pdata->ecc_bits == 4) {
+ int chunks = info->mtd.writesize / 512;
+
+ if (!chunks || info->mtd.oobsize < 16) {
+ dev_dbg(&pdev->dev, "too small\n");
+ ret = -EINVAL;
+ goto err_scan;
+ }
+
+ /* For small page chips, preserve the manufacturer's
+ * badblock marking data ... and make sure a flash BBT
+ * table marker fits in the free bytes.
+ */
+ if (chunks == 1) {
+ info->ecclayout = hwecc4_small;
+ info->ecclayout.oobfree[1].length =
+ info->mtd.oobsize - 16;
+ goto syndrome_done;
+ }
+
+ /* For large page chips we'll be wanting to use a
+ * not-yet-implemented mode that reads OOB data
+ * before reading the body of the page, to avoid
+ * the "infix OOB" model of NAND_ECC_HW_SYNDROME
+ * (and preserve manufacturer badblock markings).
+ */
+ dev_warn(&pdev->dev, "no 4-bit ECC support yet "
+ "for large page NAND\n");
+ ret = -EIO;
+ goto err_scan;
+
+syndrome_done:
+ info->chip.ecc.layout = &info->ecclayout;
+ }
+
+ ret = nand_scan_tail(&info->mtd);
+ if (ret < 0)
+ goto err_scan;
+
if (mtd_has_partitions()) {
struct mtd_partition *mtd_parts = NULL;
int mtd_parts_nb = 0;
@@ -455,22 +718,11 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
static const char *probes[] __initconst =
{ "cmdlinepart", NULL };
- const char *master_name;
-
- /* Set info->mtd.name = 0 temporarily */
- master_name = info->mtd.name;
- info->mtd.name = (char *)0;
-
- /* info->mtd.name == 0, means: don't bother checking
- <mtd-id> */
mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
&mtd_parts, 0);
-
- /* Restore info->mtd.name */
- info->mtd.name = master_name;
}
- if (mtd_parts_nb <= 0 && pdata) {
+ if (mtd_parts_nb <= 0) {
mtd_parts = pdata->parts;
mtd_parts_nb = pdata->nr_parts;
}
@@ -483,7 +735,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->partitioned = true;
}
- } else if (pdata && pdata->nr_parts) {
+ } else if (pdata->nr_parts) {
dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n",
pdata->nr_parts, info->mtd.name);
}
@@ -509,6 +761,11 @@ err_scan:
err_clk_enable:
clk_put(info->clk);
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc_mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
err_ecc:
err_clk:
err_ioremap:
@@ -532,6 +789,11 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
else
status = del_mtd_device(&info->mtd);
+ spin_lock_irq(&davinci_nand_lock);
+ if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
iounmap(info->base);
iounmap(info->vaddr);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 40c26080ecd..76beea40d2c 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -138,7 +138,14 @@ static struct nand_ecclayout nand_hw_eccoob_8 = {
static struct nand_ecclayout nand_hw_eccoob_16 = {
.eccbytes = 5,
.eccpos = {6, 7, 8, 9, 10},
- .oobfree = {{0, 6}, {12, 4}, }
+ .oobfree = {{0, 5}, {11, 5}, }
+};
+
+static struct nand_ecclayout nand_hw_eccoob_64 = {
+ .eccbytes = 20,
+ .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26,
+ 38, 39, 40, 41, 42, 54, 55, 56, 57, 58},
+ .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, }
};
#ifdef CONFIG_MTD_PARTITIONS
@@ -192,7 +199,7 @@ static void wait_op_done(struct mxc_nand_host *host, int max_retries,
}
udelay(1);
}
- if (max_retries <= 0)
+ if (max_retries < 0)
DEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n",
__func__, param);
}
@@ -795,9 +802,13 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
send_addr(host, (page_addr & 0xff), false);
if (host->pagesize_2k) {
- send_addr(host, (page_addr >> 8) & 0xFF, false);
- if (mtd->size >= 0x40000000)
+ if (mtd->size >= 0x10000000) {
+ /* paddr_8 - paddr_15 */
+ send_addr(host, (page_addr >> 8) & 0xff, false);
send_addr(host, (page_addr >> 16) & 0xff, true);
+ } else
+ /* paddr_8 - paddr_15 */
+ send_addr(host, (page_addr >> 8) & 0xff, true);
} else {
/* One more address cycle for higher density devices */
if (mtd->size >= 0x4000000) {
@@ -923,7 +934,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.mode = NAND_ECC_HW;
this->ecc.size = 512;
this->ecc.bytes = 3;
- this->ecc.layout = &nand_hw_eccoob_8;
tmp = readw(host->regs + NFC_CONFIG1);
tmp |= NFC_ECC_EN;
writew(tmp, host->regs + NFC_CONFIG1);
@@ -957,12 +967,44 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.layout = &nand_hw_eccoob_16;
}
- host->pagesize_2k = 0;
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, 1)) {
+ err = -ENXIO;
+ goto escan;
+ }
- /* Scan to find existence of the device */
- if (nand_scan(mtd, 1)) {
- DEBUG(MTD_DEBUG_LEVEL0,
- "MXC_ND: Unable to find any NAND device.\n");
+ host->pagesize_2k = (mtd->writesize == 2048) ? 1 : 0;
+
+ if (this->ecc.mode == NAND_ECC_HW) {
+ switch (mtd->oobsize) {
+ case 8:
+ this->ecc.layout = &nand_hw_eccoob_8;
+ break;
+ case 16:
+ this->ecc.layout = &nand_hw_eccoob_16;
+ break;
+ case 64:
+ this->ecc.layout = &nand_hw_eccoob_64;
+ break;
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ this->ecc.size = 512;
+ this->ecc.bytes = 3;
+ this->ecc.layout = &nand_hw_eccoob_8;
+ this->ecc.mode = NAND_ECC_SOFT;
+ this->ecc.calculate = NULL;
+ this->ecc.correct = NULL;
+ this->ecc.hwctl = NULL;
+ tmp = readw(host->regs + NFC_CONFIG1);
+ tmp &= ~NFC_ECC_EN;
+ writew(tmp, host->regs + NFC_CONFIG1);
+ break;
+ }
+ }
+
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
err = -ENXIO;
goto escan;
}
@@ -985,7 +1027,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
return 0;
escan:
- free_irq(host->irq, NULL);
+ free_irq(host->irq, host);
eirq:
iounmap(host->regs);
eres:
@@ -1005,7 +1047,7 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
nand_release(&host->mtd);
- free_irq(host->irq, NULL);
+ free_irq(host->irq, host);
iounmap(host->regs);
kfree(host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3d7ed432fa4..8c21b89d2d0 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2756,7 +2756,8 @@ int nand_scan_tail(struct mtd_info *mtd)
* the out of band area
*/
chip->ecc.layout->oobavail = 0;
- for (i = 0; chip->ecc.layout->oobfree[i].length; i++)
+ for (i = 0; chip->ecc.layout->oobfree[i].length
+ && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
chip->ecc.layout->oobavail +=
chip->ecc.layout->oobfree[i].length;
mtd->oobavail = chip->ecc.layout->oobavail;
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 868147acce2..c0cb87d6d16 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -428,8 +428,8 @@ EXPORT_SYMBOL(nand_calculate_ecc);
int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
- unsigned char b0, b1, b2;
- unsigned char byte_addr, bit_addr;
+ unsigned char b0, b1, b2, bit_addr;
+ unsigned int byte_addr;
/* 256 or 512 bytes/ecc */
const uint32_t eccsize_mult =
(((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
new file mode 100644
index 00000000000..ebd07e95b81
--- /dev/null
+++ b/drivers/mtd/nand/omap2.c
@@ -0,0 +1,779 @@
+/*
+ * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
+ * Copyright © 2004 Micron Technology Inc.
+ * Copyright © 2004 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+
+#include <asm/dma.h>
+
+#include <mach/gpmc.h>
+#include <mach/nand.h>
+
+#define GPMC_IRQ_STATUS 0x18
+#define GPMC_ECC_CONFIG 0x1F4
+#define GPMC_ECC_CONTROL 0x1F8
+#define GPMC_ECC_SIZE_CONFIG 0x1FC
+#define GPMC_ECC1_RESULT 0x200
+
+#define DRIVER_NAME "omap2-nand"
+
+/* size (4 KiB) for IO mapping */
+#define NAND_IO_SIZE SZ_4K
+
+#define NAND_WP_OFF 0
+#define NAND_WP_BIT 0x00000010
+#define WR_RD_PIN_MONITORING 0x00600000
+
+#define GPMC_BUF_FULL 0x00000001
+#define GPMC_BUF_EMPTY 0x00000000
+
+#define NAND_Ecc_P1e (1 << 0)
+#define NAND_Ecc_P2e (1 << 1)
+#define NAND_Ecc_P4e (1 << 2)
+#define NAND_Ecc_P8e (1 << 3)
+#define NAND_Ecc_P16e (1 << 4)
+#define NAND_Ecc_P32e (1 << 5)
+#define NAND_Ecc_P64e (1 << 6)
+#define NAND_Ecc_P128e (1 << 7)
+#define NAND_Ecc_P256e (1 << 8)
+#define NAND_Ecc_P512e (1 << 9)
+#define NAND_Ecc_P1024e (1 << 10)
+#define NAND_Ecc_P2048e (1 << 11)
+
+#define NAND_Ecc_P1o (1 << 16)
+#define NAND_Ecc_P2o (1 << 17)
+#define NAND_Ecc_P4o (1 << 18)
+#define NAND_Ecc_P8o (1 << 19)
+#define NAND_Ecc_P16o (1 << 20)
+#define NAND_Ecc_P32o (1 << 21)
+#define NAND_Ecc_P64o (1 << 22)
+#define NAND_Ecc_P128o (1 << 23)
+#define NAND_Ecc_P256o (1 << 24)
+#define NAND_Ecc_P512o (1 << 25)
+#define NAND_Ecc_P1024o (1 << 26)
+#define NAND_Ecc_P2048o (1 << 27)
+
+#define TF(value) (value ? 1 : 0)
+
+#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
+#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
+#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
+#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
+#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
+#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
+#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
+#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
+
+#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
+#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
+#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
+#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
+
+#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
+#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
+#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
+#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
+#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
+#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
+#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
+#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
+
+#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
+#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
+#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
+#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
+
+#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
+#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+struct omap_nand_info {
+ struct nand_hw_control controller;
+ struct omap_nand_platform_data *pdata;
+ struct mtd_info mtd;
+ struct mtd_partition *parts;
+ struct nand_chip nand;
+ struct platform_device *pdev;
+
+ int gpmc_cs;
+ unsigned long phys_base;
+ void __iomem *gpmc_cs_baseaddr;
+ void __iomem *gpmc_baseaddr;
+};
+
+/**
+ * omap_nand_wp - This function enable or disable the Write Protect feature
+ * @mtd: MTD device structure
+ * @mode: WP ON/OFF
+ */
+static void omap_nand_wp(struct mtd_info *mtd, int mode)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+
+ unsigned long config = __raw_readl(info->gpmc_baseaddr + GPMC_CONFIG);
+
+ if (mode)
+ config &= ~(NAND_WP_BIT); /* WP is ON */
+ else
+ config |= (NAND_WP_BIT); /* WP is OFF */
+
+ __raw_writel(config, (info->gpmc_baseaddr + GPMC_CONFIG));
+}
+
+/**
+ * omap_hwcontrol - hardware specific access to control-lines
+ * @mtd: MTD device structure
+ * @cmd: command to device
+ * @ctrl:
+ * NAND_NCE: bit 0 -> don't care
+ * NAND_CLE: bit 1 -> Command Latch
+ * NAND_ALE: bit 2 -> Address Latch
+ *
+ * NOTE: boards may use different bits for these!!
+ */
+static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ switch (ctrl) {
+ case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_COMMAND;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+
+ case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_ADDRESS;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+
+ case NAND_CTRL_CHANGE | NAND_NCE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ __raw_writeb(cmd, info->nand.IO_ADDR_W);
+}
+
+/**
+ * omap_read_buf16 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand = mtd->priv;
+
+ __raw_readsw(nand->IO_ADDR_R, buf, len / 2);
+}
+
+/**
+ * omap_write_buf16 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ u16 *p = (u16 *) buf;
+
+ /* FIXME try bursts of writesw() or DMA ... */
+ len >>= 1;
+
+ while (len--) {
+ writew(*p++, info->nand.IO_ADDR_W);
+
+ while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
+ GPMC_STATUS) & GPMC_BUF_FULL))
+ ;
+ }
+}
+/**
+ * omap_verify_buf - Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ */
+static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ u16 *p = (u16 *) buf;
+
+ len >>= 1;
+ while (len--) {
+ if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_MTD_NAND_OMAP_HWECC
+/**
+ * omap_hwecc_init - Initialize the HW ECC for NAND flash in GPMC controller
+ * @mtd: MTD device structure
+ */
+static void omap_hwecc_init(struct mtd_info *mtd)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned long val = 0x0;
+
+ /* Read from ECC Control Register */
+ val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* Clear all ECC | Enable Reg1 */
+ val = ((0x00000001<<8) | 0x00000001);
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+
+ /* Read from ECC Size Config Register */
+ val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
+ /* ECCSIZE1=512 | Select eccResultsize[0-3] */
+ val = ((((chip->ecc.size >> 1) - 1) << 22) | (0x0000000F));
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
+}
+
+/**
+ * gen_true_ecc - This function will generate true ECC value
+ * @ecc_buf: buffer to store ecc code
+ *
+ * This generated true ECC value can be used when correcting
+ * data read from NAND flash memory core
+ */
+static void gen_true_ecc(u8 *ecc_buf)
+{
+ u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
+ ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
+
+ ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
+ P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
+ ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
+ P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
+ ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
+ P1e(tmp) | P2048o(tmp) | P2048e(tmp));
+}
+
+/**
+ * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
+ * @ecc_data1: ecc code from nand spare area
+ * @ecc_data2: ecc code from hardware register obtained from hardware ecc
+ * @page_data: page data
+ *
+ * This function compares two ECC's and indicates if there is an error.
+ * If the error can be corrected it will be corrected to the buffer.
+ */
+static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
+ u8 *ecc_data2, /* read from register */
+ u8 *page_data)
+{
+ uint i;
+ u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
+ u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
+ u8 ecc_bit[24];
+ u8 ecc_sum = 0;
+ u8 find_bit = 0;
+ uint find_byte = 0;
+ int isEccFF;
+
+ isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
+
+ gen_true_ecc(ecc_data1);
+ gen_true_ecc(ecc_data2);
+
+ for (i = 0; i <= 2; i++) {
+ *(ecc_data1 + i) = ~(*(ecc_data1 + i));
+ *(ecc_data2 + i) = ~(*(ecc_data2 + i));
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp0_bit[i] = *ecc_data1 % 2;
+ *ecc_data1 = *ecc_data1 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp1_bit[i] = *(ecc_data1 + 1) % 2;
+ *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp2_bit[i] = *(ecc_data1 + 2) % 2;
+ *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp0_bit[i] = *ecc_data2 % 2;
+ *ecc_data2 = *ecc_data2 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp1_bit[i] = *(ecc_data2 + 1) % 2;
+ *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp2_bit[i] = *(ecc_data2 + 2) % 2;
+ *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
+ }
+
+ for (i = 0; i < 6; i++)
+ ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
+
+ ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
+ ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
+
+ for (i = 0; i < 24; i++)
+ ecc_sum += ecc_bit[i];
+
+ switch (ecc_sum) {
+ case 0:
+ /* Not reached because this function is not called if
+ * ECC values are equal
+ */
+ return 0;
+
+ case 1:
+ /* Uncorrectable error */
+ DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
+ return -1;
+
+ case 11:
+ /* UN-Correctable error */
+ DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
+ return -1;
+
+ case 12:
+ /* Correctable error */
+ find_byte = (ecc_bit[23] << 8) +
+ (ecc_bit[21] << 7) +
+ (ecc_bit[19] << 6) +
+ (ecc_bit[17] << 5) +
+ (ecc_bit[15] << 4) +
+ (ecc_bit[13] << 3) +
+ (ecc_bit[11] << 2) +
+ (ecc_bit[9] << 1) +
+ ecc_bit[7];
+
+ find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
+
+ DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
+ "offset: %d, bit: %d\n", find_byte, find_bit);
+
+ page_data[find_byte] ^= (1 << find_bit);
+
+ return 0;
+ default:
+ if (isEccFF) {
+ if (ecc_data2[0] == 0 &&
+ ecc_data2[1] == 0 &&
+ ecc_data2[2] == 0)
+ return 0;
+ }
+ DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
+ return -1;
+ }
+}
+
+/**
+ * omap_correct_data - Compares the ECC read with HW generated ECC
+ * @mtd: MTD device structure
+ * @dat: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Compares the ecc read from nand spare area with ECC registers values
+ * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
+ * and correction.
+ */
+static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ int blockCnt = 0, i = 0, ret = 0;
+
+ /* Ex NAND_ECC_HW12_2048 */
+ if ((info->nand.ecc.mode == NAND_ECC_HW) &&
+ (info->nand.ecc.size == 2048))
+ blockCnt = 4;
+ else
+ blockCnt = 1;
+
+ for (i = 0; i < blockCnt; i++) {
+ if (memcmp(read_ecc, calc_ecc, 3) != 0) {
+ ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
+ if (ret < 0)
+ return ret;
+ }
+ read_ecc += 3;
+ calc_ecc += 3;
+ dat += 512;
+ }
+ return 0;
+}
+
+/**
+ * omap_calcuate_ecc - Generate non-inverted ECC bytes.
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Using noninverted ECC can be considered ugly since writing a blank
+ * page ie. padding will clear the ECC bytes. This is no problem as long
+ * nobody is trying to write data on the seemingly unused page. Reading
+ * an erased page will produce an ECC mismatch between generated and read
+ * ECC bytes that has to be dealt with separately.
+ */
+static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long val = 0x0;
+ unsigned long reg;
+
+ /* Start Reading from HW ECC1_Result = 0x200 */
+ reg = (unsigned long)(info->gpmc_baseaddr + GPMC_ECC1_RESULT);
+ val = __raw_readl(reg);
+ *ecc_code++ = val; /* P128e, ..., P1e */
+ *ecc_code++ = val >> 16; /* P128o, ..., P1o */
+ /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+ *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
+ reg += 4;
+
+ return 0;
+}
+
+/**
+ * omap_enable_hwecc - This function enables the hardware ecc functionality
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ unsigned long val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONFIG);
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ case NAND_ECC_READSYN:
+ __raw_writel(0x100, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ case NAND_ECC_WRITE:
+ __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ default:
+ DEBUG(MTD_DEBUG_LEVEL0, "Error: Unrecognized Mode[%d]!\n",
+ mode);
+ break;
+ }
+
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONFIG);
+}
+#endif
+
+/**
+ * omap_wait - wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND Chip structure
+ *
+ * Wait function is called during Program and erase operations and
+ * the way it is called from MTD layer, we should wait till the NAND
+ * chip is ready after the programming/erase operation has completed.
+ *
+ * Erase can take up to 400ms and program up to 20ms according to
+ * general NAND and SmartMedia specs
+ */
+static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long timeo = jiffies;
+ int status = NAND_STATUS_FAIL, state = this->state;
+
+ if (state == FL_ERASING)
+ timeo += (HZ * 400) / 1000;
+ else
+ timeo += (HZ * 20) / 1000;
+
+ this->IO_ADDR_W = (void *) info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_COMMAND;
+ this->IO_ADDR_R = (void *) info->gpmc_cs_baseaddr + GPMC_CS_NAND_DATA;
+
+ __raw_writeb(NAND_CMD_STATUS & 0xFF, this->IO_ADDR_W);
+
+ while (time_before(jiffies, timeo)) {
+ status = __raw_readb(this->IO_ADDR_R);
+ if (status & NAND_STATUS_READY)
+ break;
+ cond_resched();
+ }
+ return status;
+}
+
+/**
+ * omap_dev_ready - calls the platform specific dev_ready function
+ * @mtd: MTD device structure
+ */
+static int omap_dev_ready(struct mtd_info *mtd)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned int val = __raw_readl(info->gpmc_baseaddr + GPMC_IRQ_STATUS);
+
+ if ((val & 0x100) == 0x100) {
+ /* Clear IRQ Interrupt */
+ val |= 0x100;
+ val &= ~(0x0);
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_IRQ_STATUS);
+ } else {
+ unsigned int cnt = 0;
+ while (cnt++ < 0x1FF) {
+ if ((val & 0x100) == 0x100)
+ return 0;
+ val = __raw_readl(info->gpmc_baseaddr +
+ GPMC_IRQ_STATUS);
+ }
+ }
+
+ return 1;
+}
+
+static int __devinit omap_nand_probe(struct platform_device *pdev)
+{
+ struct omap_nand_info *info;
+ struct omap_nand_platform_data *pdata;
+ int err;
+ unsigned long val;
+
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, info);
+
+ spin_lock_init(&info->controller.lock);
+ init_waitqueue_head(&info->controller.wq);
+
+ info->pdev = pdev;
+
+ info->gpmc_cs = pdata->cs;
+ info->gpmc_baseaddr = pdata->gpmc_baseaddr;
+ info->gpmc_cs_baseaddr = pdata->gpmc_cs_baseaddr;
+
+ info->mtd.priv = &info->nand;
+ info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.owner = THIS_MODULE;
+
+ err = gpmc_cs_request(info->gpmc_cs, NAND_IO_SIZE, &info->phys_base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Cannot request GPMC CS\n");
+ goto out_free_info;
+ }
+
+ /* Enable RD PIN Monitoring Reg */
+ if (pdata->dev_ready) {
+ val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1);
+ val |= WR_RD_PIN_MONITORING;
+ gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG1, val);
+ }
+
+ val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG7);
+ val &= ~(0xf << 8);
+ val |= (0xc & 0xf) << 8;
+ gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG7, val);
+
+ /* NAND write protect off */
+ omap_nand_wp(&info->mtd, NAND_WP_OFF);
+
+ if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
+ pdev->dev.driver->name)) {
+ err = -EBUSY;
+ goto out_free_cs;
+ }
+
+ info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
+ if (!info->nand.IO_ADDR_R) {
+ err = -ENOMEM;
+ goto out_release_mem_region;
+ }
+ info->nand.controller = &info->controller;
+
+ info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
+ info->nand.cmd_ctrl = omap_hwcontrol;
+
+ /* REVISIT: only supports 16-bit NAND flash */
+
+ info->nand.read_buf = omap_read_buf16;
+ info->nand.write_buf = omap_write_buf16;
+ info->nand.verify_buf = omap_verify_buf;
+
+ /*
+ * If RDY/BSY line is connected to OMAP then use the omap ready
+ * funcrtion and the generic nand_wait function which reads the status
+ * register after monitoring the RDY/BSY line.Otherwise use a standard
+ * chip delay which is slightly more than tR (AC Timing) of the NAND
+ * device and read status register until you get a failure or success
+ */
+ if (pdata->dev_ready) {
+ info->nand.dev_ready = omap_dev_ready;
+ info->nand.chip_delay = 0;
+ } else {
+ info->nand.waitfunc = omap_wait;
+ info->nand.chip_delay = 50;
+ }
+
+ info->nand.options |= NAND_SKIP_BBTSCAN;
+ if ((gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1) & 0x3000)
+ == 0x1000)
+ info->nand.options |= NAND_BUSWIDTH_16;
+
+#ifdef CONFIG_MTD_NAND_OMAP_HWECC
+ info->nand.ecc.bytes = 3;
+ info->nand.ecc.size = 512;
+ info->nand.ecc.calculate = omap_calculate_ecc;
+ info->nand.ecc.hwctl = omap_enable_hwecc;
+ info->nand.ecc.correct = omap_correct_data;
+ info->nand.ecc.mode = NAND_ECC_HW;
+
+ /* init HW ECC */
+ omap_hwecc_init(&info->mtd);
+#else
+ info->nand.ecc.mode = NAND_ECC_SOFT;
+#endif
+
+ /* DIP switches on some boards change between 8 and 16 bit
+ * bus widths for flash. Try the other width if the first try fails.
+ */
+ if (nand_scan(&info->mtd, 1)) {
+ info->nand.options ^= NAND_BUSWIDTH_16;
+ if (nand_scan(&info->mtd, 1)) {
+ err = -ENXIO;
+ goto out_release_mem_region;
+ }
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
+ if (err > 0)
+ add_mtd_partitions(&info->mtd, info->parts, err);
+ else if (pdata->parts)
+ add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
+ else
+#endif
+ add_mtd_device(&info->mtd);
+
+ platform_set_drvdata(pdev, &info->mtd);
+
+ return 0;
+
+out_release_mem_region:
+ release_mem_region(info->phys_base, NAND_IO_SIZE);
+out_free_cs:
+ gpmc_cs_free(info->gpmc_cs);
+out_free_info:
+ kfree(info);
+
+ return err;
+}
+
+static int omap_nand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct omap_nand_info *info = mtd->priv;
+
+ platform_set_drvdata(pdev, NULL);
+ /* Release NAND device, its internal structures and partitions */
+ nand_release(&info->mtd);
+ iounmap(info->nand.IO_ADDR_R);
+ kfree(&info->mtd);
+ return 0;
+}
+
+static struct platform_driver omap_nand_driver = {
+ .probe = omap_nand_probe,
+ .remove = omap_nand_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap_nand_init(void)
+{
+ printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
+ return platform_driver_register(&omap_nand_driver);
+}
+
+static void __exit omap_nand_exit(void)
+{
+ platform_driver_unregister(&omap_nand_driver);
+}
+
+module_init(omap_nand_init);
+module_exit(omap_nand_exit);
+
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index c2dfd3ea353..0d9d4bc9c76 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -47,6 +47,28 @@ static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl
writeb(cmd, nc->IO_ADDR_W + offs);
}
+static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *io_base = chip->IO_ADDR_R;
+ uint64_t *buf64;
+ int i = 0;
+
+ while (len && (unsigned long)buf & 7) {
+ *buf++ = readb(io_base);
+ len--;
+ }
+ buf64 = (uint64_t *)buf;
+ while (i < len/8) {
+ uint64_t x;
+ asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
+ buf64[i++] = x;
+ }
+ i *= 8;
+ while (i < len)
+ buf[i++] = readb(io_base);
+}
+
static int __init orion_nand_probe(struct platform_device *pdev)
{
struct mtd_info *mtd;
@@ -83,6 +105,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
nc->priv = board;
nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
nc->cmd_ctrl = orion_nand_cmd_ctrl;
+ nc->read_buf = orion_nand_read_buf;
nc->ecc.mode = NAND_ECC_SOFT;
if (board->chip_delay)
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 86e1d08eee0..4e16c6f5bdd 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -61,6 +61,8 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
data->chip.dev_ready = pdata->ctrl.dev_ready;
data->chip.select_chip = pdata->ctrl.select_chip;
+ data->chip.write_buf = pdata->ctrl.write_buf;
+ data->chip.read_buf = pdata->ctrl.read_buf;
data->chip.chip_delay = pdata->chip.chip_delay;
data->chip.options |= pdata->chip.options;
@@ -70,6 +72,13 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
+ /* Handle any platform specific setup */
+ if (pdata->ctrl.probe) {
+ res = pdata->ctrl.probe(pdev);
+ if (res)
+ goto out;
+ }
+
/* Scan to find existance of the device */
if (nand_scan(&data->mtd, 1)) {
res = -ENXIO;
@@ -86,6 +95,8 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
return 0;
}
}
+ if (pdata->chip.set_parts)
+ pdata->chip.set_parts(data->mtd.size, &pdata->chip);
if (pdata->chip.partitions) {
data->parts = pdata->chip.partitions;
res = add_mtd_partitions(&data->mtd, data->parts,
@@ -99,6 +110,8 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
nand_release(&data->mtd);
out:
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
platform_set_drvdata(pdev, NULL);
iounmap(data->io_base);
kfree(data);
@@ -111,15 +124,15 @@ out:
static int __devexit plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
-#ifdef CONFIG_MTD_PARTITIONS
struct platform_nand_data *pdata = pdev->dev.platform_data;
-#endif
nand_release(&data->mtd);
#ifdef CONFIG_MTD_PARTITIONS
if (data->parts && data->parts != pdata->chip.partitions)
kfree(data->parts);
#endif
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
iounmap(data->io_base);
kfree(data);
@@ -128,7 +141,7 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
static struct platform_driver plat_nand_driver = {
.probe = plat_nand_probe,
- .remove = plat_nand_remove,
+ .remove = __devexit_p(plat_nand_remove),
.driver = {
.name = "gen_nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 8e375d5fe23..11dc7e69c4f 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -74,6 +74,14 @@ static struct nand_ecclayout nand_hw_eccoob = {
struct s3c2410_nand_info;
+/**
+ * struct s3c2410_nand_mtd - driver MTD structure
+ * @mtd: The MTD instance to pass to the MTD layer.
+ * @chip: The NAND chip information.
+ * @set: The platform information supplied for this set of NAND chips.
+ * @info: Link back to the hardware information.
+ * @scan_res: The result from calling nand_scan_ident().
+*/
struct s3c2410_nand_mtd {
struct mtd_info mtd;
struct nand_chip chip;
@@ -90,6 +98,21 @@ enum s3c_cpu_type {
/* overview of the s3c2410 nand state */
+/**
+ * struct s3c2410_nand_info - NAND controller state.
+ * @mtds: An array of MTD instances on this controoler.
+ * @platform: The platform data for this board.
+ * @device: The platform device we bound to.
+ * @area: The IO area resource that came from request_mem_region().
+ * @clk: The clock resource for this controller.
+ * @regs: The area mapped for the hardware registers described by @area.
+ * @sel_reg: Pointer to the register controlling the NAND selection.
+ * @sel_bit: The bit in @sel_reg to select the NAND chip.
+ * @mtd_count: The number of MTDs created from this controller.
+ * @save_sel: The contents of @sel_reg to be saved over suspend.
+ * @clk_rate: The clock rate from @clk.
+ * @cpu_type: The exact type of this controller.
+ */
struct s3c2410_nand_info {
/* mtd info */
struct nand_hw_control controller;
@@ -145,12 +168,19 @@ static inline int allow_clk_stop(struct s3c2410_nand_info *info)
#define NS_IN_KHZ 1000000
+/**
+ * s3c_nand_calc_rate - calculate timing data.
+ * @wanted: The cycle time in nanoseconds.
+ * @clk: The clock rate in kHz.
+ * @max: The maximum divider value.
+ *
+ * Calculate the timing value from the given parameters.
+ */
static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
{
int result;
- result = (wanted * clk) / NS_IN_KHZ;
- result++;
+ result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
pr_debug("result %d from %ld, %d\n", result, clk, wanted);
@@ -169,13 +199,21 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
/* controller setup */
+/**
+ * s3c2410_nand_setrate - setup controller timing information.
+ * @info: The controller instance.
+ *
+ * Given the information supplied by the platform, calculate and set
+ * the necessary timing registers in the hardware to generate the
+ * necessary timing cycles to the hardware.
+ */
static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
{
struct s3c2410_platform_nand *plat = info->platform;
int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
int tacls, twrph0, twrph1;
unsigned long clkrate = clk_get_rate(info->clk);
- unsigned long set, cfg, mask;
+ unsigned long uninitialized_var(set), cfg, uninitialized_var(mask);
unsigned long flags;
/* calculate the timing information for the controller */
@@ -215,9 +253,9 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
case TYPE_S3C2440:
case TYPE_S3C2412:
- mask = (S3C2410_NFCONF_TACLS(tacls_max - 1) |
- S3C2410_NFCONF_TWRPH0(7) |
- S3C2410_NFCONF_TWRPH1(7));
+ mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
+ S3C2440_NFCONF_TWRPH0(7) |
+ S3C2440_NFCONF_TWRPH1(7));
set = S3C2440_NFCONF_TACLS(tacls - 1);
set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
@@ -225,14 +263,9 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
break;
default:
- /* keep compiler happy */
- mask = 0;
- set = 0;
BUG();
}
- dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
-
local_irq_save(flags);
cfg = readl(info->regs + S3C2410_NFCONF);
@@ -242,9 +275,18 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
local_irq_restore(flags);
+ dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
+
return 0;
}
+/**
+ * s3c2410_nand_inithw - basic hardware initialisation
+ * @info: The hardware state.
+ *
+ * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
+ * to setup the hardware access speeds and set the controller to be enabled.
+*/
static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
{
int ret;
@@ -268,8 +310,19 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
return 0;
}
-/* select chip */
-
+/**
+ * s3c2410_nand_select_chip - select the given nand chip
+ * @mtd: The MTD instance for this chip.
+ * @chip: The chip number.
+ *
+ * This is called by the MTD layer to either select a given chip for the
+ * @mtd instance, or to indicate that the access has finished and the
+ * chip can be de-selected.
+ *
+ * The routine ensures that the nFCE line is correctly setup, and any
+ * platform specific selection code is called to route nFCE to the specific
+ * chip.
+ */
static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
{
struct s3c2410_nand_info *info;
@@ -530,7 +583,16 @@ static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- readsl(info->regs + S3C2440_NFDATA, buf, len / 4);
+
+ readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup if we've got less than a word to do */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--)
+ *buf++ = readb(info->regs + S3C2440_NFDATA);
+ }
}
static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
@@ -542,7 +604,16 @@ static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int
static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- writesl(info->regs + S3C2440_NFDATA, buf, len / 4);
+
+ writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup any fractional write */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--, buf++)
+ writeb(*buf, info->regs + S3C2440_NFDATA);
+ }
}
/* cpufreq driver support */
@@ -593,7 +664,7 @@ static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *inf
/* device management functions */
-static int s3c2410_nand_remove(struct platform_device *pdev)
+static int s3c24xx_nand_remove(struct platform_device *pdev)
{
struct s3c2410_nand_info *info = to_nand_info(pdev);
@@ -645,17 +716,31 @@ static int s3c2410_nand_remove(struct platform_device *pdev)
}
#ifdef CONFIG_MTD_PARTITIONS
+const char *part_probes[] = { "cmdlinepart", NULL };
static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
struct s3c2410_nand_set *set)
{
+ struct mtd_partition *part_info;
+ int nr_part = 0;
+
if (set == NULL)
return add_mtd_device(&mtd->mtd);
- if (set->nr_partitions > 0 && set->partitions != NULL) {
- return add_mtd_partitions(&mtd->mtd, set->partitions, set->nr_partitions);
+ if (set->nr_partitions == 0) {
+ mtd->mtd.name = set->name;
+ nr_part = parse_mtd_partitions(&mtd->mtd, part_probes,
+ &part_info, 0);
+ } else {
+ if (set->nr_partitions > 0 && set->partitions != NULL) {
+ nr_part = set->nr_partitions;
+ part_info = set->partitions;
+ }
}
+ if (nr_part > 0 && part_info)
+ return add_mtd_partitions(&mtd->mtd, part_info, nr_part);
+
return add_mtd_device(&mtd->mtd);
}
#else
@@ -667,11 +752,16 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
}
#endif
-/* s3c2410_nand_init_chip
+/**
+ * s3c2410_nand_init_chip - initialise a single instance of an chip
+ * @info: The base NAND controller the chip is on.
+ * @nmtd: The new controller MTD instance to fill in.
+ * @set: The information passed from the board specific platform data.
*
- * init a single instance of an chip
-*/
-
+ * Initialise the given @nmtd from the information in @info and @set. This
+ * readies the structure for use with the MTD layer functions by ensuring
+ * all pointers are setup and the necessary control routines selected.
+ */
static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *nmtd,
struct s3c2410_nand_set *set)
@@ -757,14 +847,40 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
if (set->disable_ecc)
chip->ecc.mode = NAND_ECC_NONE;
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_NONE:
+ dev_info(info->device, "NAND ECC disabled\n");
+ break;
+ case NAND_ECC_SOFT:
+ dev_info(info->device, "NAND soft ECC\n");
+ break;
+ case NAND_ECC_HW:
+ dev_info(info->device, "NAND hardware ECC\n");
+ break;
+ default:
+ dev_info(info->device, "NAND ECC UNKNOWN\n");
+ break;
+ }
+
+ /* If you use u-boot BBT creation code, specifying this flag will
+ * let the kernel fish out the BBT from the NAND, and also skip the
+ * full NAND scan that can take 1/2s or so. Little things... */
+ if (set->flash_bbt)
+ chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
}
-/* s3c2410_nand_update_chip
+/**
+ * s3c2410_nand_update_chip - post probe update
+ * @info: The controller instance.
+ * @nmtd: The driver version of the MTD instance.
*
- * post-probe chip update, to change any items, such as the
- * layout for large page nand
- */
-
+ * This routine is called after the chip probe has succesfully completed
+ * and the relevant per-chip information updated. This call ensure that
+ * we update the internal state accordingly.
+ *
+ * The internal state is currently limited to the ECC state information.
+*/
static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *nmtd)
{
@@ -773,33 +889,33 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
dev_dbg(info->device, "chip %p => page shift %d\n",
chip, chip->page_shift);
- if (hardware_ecc) {
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return;
+
/* change the behaviour depending on wether we are using
* the large or small page nand device */
- if (chip->page_shift > 10) {
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
- } else {
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- chip->ecc.layout = &nand_hw_eccoob;
- }
+ if (chip->page_shift > 10) {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ } else {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.layout = &nand_hw_eccoob;
}
}
-/* s3c2410_nand_probe
+/* s3c24xx_nand_probe
*
* called by device layer when it finds a device matching
* one our driver can handled. This code checks to see if
* it can allocate all necessary resources then calls the
* nand layer to look for devices
*/
-
-static int s3c24xx_nand_probe(struct platform_device *pdev,
- enum s3c_cpu_type cpu_type)
+static int s3c24xx_nand_probe(struct platform_device *pdev)
{
struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
+ enum s3c_cpu_type cpu_type;
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
struct s3c2410_nand_set *sets;
@@ -809,6 +925,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
int nr_sets;
int setno;
+ cpu_type = platform_get_device_id(pdev)->driver_data;
+
pr_debug("s3c2410_nand_probe(%p)\n", pdev);
info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -922,7 +1040,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
return 0;
exit_error:
- s3c2410_nand_remove(pdev);
+ s3c24xx_nand_remove(pdev);
if (err == 0)
err = -EINVAL;
@@ -983,50 +1101,33 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
/* driver device registration */
-static int s3c2410_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2410);
-}
-
-static int s3c2440_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2440);
-}
-
-static int s3c2412_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2412);
-}
-
-static struct platform_driver s3c2410_nand_driver = {
- .probe = s3c2410_nand_probe,
- .remove = s3c2410_nand_remove,
- .suspend = s3c24xx_nand_suspend,
- .resume = s3c24xx_nand_resume,
- .driver = {
- .name = "s3c2410-nand",
- .owner = THIS_MODULE,
+static struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-nand",
+ .driver_data = TYPE_S3C2410,
+ }, {
+ .name = "s3c2440-nand",
+ .driver_data = TYPE_S3C2440,
+ }, {
+ .name = "s3c2412-nand",
+ .driver_data = TYPE_S3C2412,
+ }, {
+ .name = "s3c6400-nand",
+ .driver_data = TYPE_S3C2412, /* compatible with 2412 */
},
+ { }
};
-static struct platform_driver s3c2440_nand_driver = {
- .probe = s3c2440_nand_probe,
- .remove = s3c2410_nand_remove,
- .suspend = s3c24xx_nand_suspend,
- .resume = s3c24xx_nand_resume,
- .driver = {
- .name = "s3c2440-nand",
- .owner = THIS_MODULE,
- },
-};
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-static struct platform_driver s3c2412_nand_driver = {
- .probe = s3c2412_nand_probe,
- .remove = s3c2410_nand_remove,
+static struct platform_driver s3c24xx_nand_driver = {
+ .probe = s3c24xx_nand_probe,
+ .remove = s3c24xx_nand_remove,
.suspend = s3c24xx_nand_suspend,
.resume = s3c24xx_nand_resume,
+ .id_table = s3c24xx_driver_ids,
.driver = {
- .name = "s3c2412-nand",
+ .name = "s3c24xx-nand",
.owner = THIS_MODULE,
},
};
@@ -1035,16 +1136,12 @@ static int __init s3c2410_nand_init(void)
{
printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
- platform_driver_register(&s3c2412_nand_driver);
- platform_driver_register(&s3c2440_nand_driver);
- return platform_driver_register(&s3c2410_nand_driver);
+ return platform_driver_register(&s3c24xx_nand_driver);
}
static void __exit s3c2410_nand_exit(void)
{
- platform_driver_unregister(&s3c2412_nand_driver);
- platform_driver_unregister(&s3c2440_nand_driver);
- platform_driver_unregister(&s3c2410_nand_driver);
+ platform_driver_unregister(&s3c24xx_nand_driver);
}
module_init(s3c2410_nand_init);
@@ -1053,6 +1150,3 @@ module_exit(s3c2410_nand_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
-MODULE_ALIAS("platform:s3c2410-nand");
-MODULE_ALIAS("platform:s3c2412-nand");
-MODULE_ALIAS("platform:s3c2440-nand");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 81247926489..488088eff2c 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -64,7 +64,7 @@ struct txx9ndfmc_priv {
struct nand_chip chip;
struct mtd_info mtd;
int cs;
- char mtdname[BUS_ID_SIZE + 2];
+ const char *mtdname;
};
#define MAX_TXX9NDFMC_DEV 4
@@ -334,16 +334,23 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
if (plat->ch_mask != 1) {
txx9_priv->cs = i;
- sprintf(txx9_priv->mtdname, "%s.%u",
- dev_name(&dev->dev), i);
+ txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
+ dev_name(&dev->dev), i);
} else {
txx9_priv->cs = -1;
- strcpy(txx9_priv->mtdname, dev_name(&dev->dev));
+ txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
+ GFP_KERNEL);
+ }
+ if (!txx9_priv->mtdname) {
+ kfree(txx9_priv);
+ dev_err(&dev->dev, "Unable to allocate MTD name.\n");
+ continue;
}
if (plat->wide_mask & (1 << i))
chip->options |= NAND_BUSWIDTH_16;
if (nand_scan(mtd, 1)) {
+ kfree(txx9_priv->mtdname);
kfree(txx9_priv);
continue;
}
@@ -385,6 +392,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
kfree(drvdata->parts[i]);
#endif
del_mtd_device(mtd);
+ kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
return 0;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index e3f8495a94c..1002e188299 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -135,16 +135,17 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
+ loff_t mask = mtd->writesize - 1;
struct mtd_oob_ops ops;
int res;
ops.mode = MTD_OOB_PLACE;
- ops.ooboffs = offs & (mtd->writesize - 1);
+ ops.ooboffs = offs & mask;
ops.ooblen = len;
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd->read_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -155,16 +156,17 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
+ loff_t mask = mtd->writesize - 1;
struct mtd_oob_ops ops;
int res;
ops.mode = MTD_OOB_PLACE;
- ops.ooboffs = offs & (mtd->writesize - 1);
+ ops.ooboffs = offs & mask;
ops.ooblen = len;
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd->write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -177,17 +179,18 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf, uint8_t *oob)
{
+ loff_t mask = mtd->writesize - 1;
struct mtd_oob_ops ops;
int res;
ops.mode = MTD_OOB_PLACE;
- ops.ooboffs = offs;
+ ops.ooboffs = offs & mask;
ops.ooblen = mtd->oobsize;
ops.oobbuf = oob;
ops.datbuf = buf;
ops.len = len;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd->write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.retlen;
return res;
}
@@ -208,7 +211,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
/* Normally, we force a fold to happen before we run out of free blocks completely */
if (!desperate && nftl->numfreeEUNs < 2) {
DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n");
- return 0xffff;
+ return BLOCK_NIL;
}
/* Scan for a free block */
@@ -230,11 +233,11 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
printk("Argh! No free blocks found! LastFreeEUN = %d, "
"FirstEUN = %d\n", nftl->LastFreeEUN,
le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN));
- return 0xffff;
+ return BLOCK_NIL;
}
} while (pot != nftl->LastFreeEUN);
- return 0xffff;
+ return BLOCK_NIL;
}
static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
@@ -431,7 +434,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
/* add the header so that it is now a valid chain */
oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
- oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff;
+ oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = BLOCK_NIL;
nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8,
8, &retlen, (char *)&oob.u);
@@ -515,7 +518,7 @@ static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
if (ChainLength < 2) {
printk(KERN_WARNING "No Virtual Unit Chains available for folding. "
"Failing request\n");
- return 0xffff;
+ return BLOCK_NIL;
}
return NFTL_foldchain (nftl, LongestChain, pendingblock);
@@ -578,7 +581,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
printk(KERN_WARNING
"Infinite loop in Virtual Unit Chain 0x%x\n",
thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Skip to next block in chain */
@@ -601,7 +604,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
//u16 startEUN = nftl->EUNtable[thisVUC];
//printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC);
- writeEUN = NFTL_makefreeblock(nftl, 0xffff);
+ writeEUN = NFTL_makefreeblock(nftl, BLOCK_NIL);
if (writeEUN == BLOCK_NIL) {
/* OK, we accept that the above comment is
@@ -673,7 +676,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n",
thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 6391e3dc800..0108ed42e87 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -266,7 +266,7 @@ static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
if (ONENAND_CURRENT_BUFFERRAM(this)) {
if (area == ONENAND_DATARAM)
- return mtd->writesize;
+ return this->writesize;
if (area == ONENAND_SPARERAM)
return mtd->oobsize;
}
@@ -565,7 +565,7 @@ int omap2_onenand_rephase(void)
NULL, __adjust_timing);
}
-static void __devexit omap2_onenand_shutdown(struct platform_device *pdev)
+static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -770,6 +770,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
}
iounmap(c->onenand.base);
release_mem_region(c->phys_base, ONENAND_IO_SIZE);
+ gpmc_cs_free(c->gpmc_cs);
kfree(c);
return 0;
@@ -777,7 +778,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
- .remove = omap2_onenand_remove,
+ .remove = __devexit_p(omap2_onenand_remove),
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 30d6999e5f9..6e829095ea9 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -9,6 +9,10 @@
* auto-placement support, read-while load support, various fixes
* Copyright (C) Nokia Corporation, 2007
*
+ * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
+ * Flex-OneNAND support
+ * Copyright (C) Samsung Electronics, 2008
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -16,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -27,6 +32,38 @@
#include <asm/io.h>
+/* Default Flex-OneNAND boundary and lock respectively */
+static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
+
+module_param_array(flex_bdry, int, NULL, 0400);
+MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
+ "Syntax:flex_bdry=DIE_BDRY,LOCK,..."
+ "DIE_BDRY: SLC boundary of the die"
+ "LOCK: Locking information for SLC boundary"
+ " : 0->Set boundary in unlocked status"
+ " : 1->Set boundary in locked status");
+
+/**
+ * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
+ * For now, we expose only 64 out of 80 ecc bytes
+ */
+static struct nand_ecclayout onenand_oob_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 102, 103, 104, 105
+ },
+ .oobfree = {
+ {2, 4}, {18, 4}, {34, 4}, {50, 4},
+ {66, 4}, {82, 4}, {98, 4}, {114, 4}
+ }
+};
+
/**
* onenand_oob_64 - oob info for large (2KB) page
*/
@@ -65,6 +102,14 @@ static const unsigned char ffchars[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */
};
/**
@@ -171,6 +216,70 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
}
/**
+ * flexonenand_block- For given address return block number
+ * @param this - OneNAND device structure
+ * @param addr - Address for which block number is needed
+ */
+static unsigned flexonenand_block(struct onenand_chip *this, loff_t addr)
+{
+ unsigned boundary, blk, die = 0;
+
+ if (ONENAND_IS_DDP(this) && addr >= this->diesize[0]) {
+ die = 1;
+ addr -= this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+
+ blk = addr >> (this->erase_shift - 1);
+ if (blk > boundary)
+ blk = (blk + boundary + 1) >> 1;
+
+ blk += die ? this->density_mask : 0;
+ return blk;
+}
+
+inline unsigned onenand_block(struct onenand_chip *this, loff_t addr)
+{
+ if (!FLEXONENAND(this))
+ return addr >> this->erase_shift;
+ return flexonenand_block(this, addr);
+}
+
+/**
+ * flexonenand_addr - Return address of the block
+ * @this: OneNAND device structure
+ * @block: Block number on Flex-OneNAND
+ *
+ * Return address of the block
+ */
+static loff_t flexonenand_addr(struct onenand_chip *this, int block)
+{
+ loff_t ofs = 0;
+ int die = 0, boundary;
+
+ if (ONENAND_IS_DDP(this) && block >= this->density_mask) {
+ block -= this->density_mask;
+ die = 1;
+ ofs = this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+ ofs += (loff_t)block << (this->erase_shift - 1);
+ if (block > (boundary + 1))
+ ofs += (loff_t)(block - boundary - 1) << (this->erase_shift - 1);
+ return ofs;
+}
+
+loff_t onenand_addr(struct onenand_chip *this, int block)
+{
+ if (!FLEXONENAND(this))
+ return (loff_t)block << this->erase_shift;
+ return flexonenand_addr(this, block);
+}
+EXPORT_SYMBOL(onenand_addr);
+
+/**
* onenand_get_density - [DEFAULT] Get OneNAND density
* @param dev_id OneNAND device ID
*
@@ -183,6 +292,22 @@ static inline int onenand_get_density(int dev_id)
}
/**
+ * flexonenand_region - [Flex-OneNAND] Return erase region of addr
+ * @param mtd MTD device structure
+ * @param addr address whose erase region needs to be identified
+ */
+int flexonenand_region(struct mtd_info *mtd, loff_t addr)
+{
+ int i;
+
+ for (i = 0; i < mtd->numeraseregions; i++)
+ if (addr < mtd->eraseregions[i].offset)
+ break;
+ return i - 1;
+}
+EXPORT_SYMBOL(flexonenand_region);
+
+/**
* onenand_command - [DEFAULT] Send command to OneNAND device
* @param mtd MTD device structure
* @param cmd the command to be sent
@@ -207,16 +332,28 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
page = -1;
break;
+ case FLEXONENAND_CMD_PI_ACCESS:
+ /* addr contains die index */
+ block = addr * this->density_mask;
+ page = -1;
+ break;
+
case ONENAND_CMD_ERASE:
case ONENAND_CMD_BUFFERRAM:
case ONENAND_CMD_OTP_ACCESS:
- block = (int) (addr >> this->erase_shift);
+ block = onenand_block(this, addr);
page = -1;
break;
+ case FLEXONENAND_CMD_READ_PI:
+ cmd = ONENAND_CMD_READ;
+ block = addr * this->density_mask;
+ page = 0;
+ break;
+
default:
- block = (int) (addr >> this->erase_shift);
- page = (int) (addr >> this->page_shift);
+ block = onenand_block(this, addr);
+ page = (int) (addr - onenand_addr(this, block)) >> this->page_shift;
if (ONENAND_IS_2PLANE(this)) {
/* Make the even block number */
@@ -236,7 +373,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
- if (ONENAND_IS_2PLANE(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this))
/* It is always BufferRAM0 */
ONENAND_SET_BUFFERRAM0(this);
else
@@ -258,13 +395,18 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
if (page != -1) {
/* Now we use page size operation */
- int sectors = 4, count = 4;
+ int sectors = 0, count = 0;
int dataram;
switch (cmd) {
+ case FLEXONENAND_CMD_RECOVER_LSB:
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
- dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
+ if (ONENAND_IS_MLC(this))
+ /* It is always BufferRAM0 */
+ dataram = ONENAND_SET_BUFFERRAM0(this);
+ else
+ dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
break;
default:
@@ -293,6 +435,30 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
}
/**
+ * onenand_read_ecc - return ecc status
+ * @param this onenand chip structure
+ */
+static inline int onenand_read_ecc(struct onenand_chip *this)
+{
+ int ecc, i, result = 0;
+
+ if (!FLEXONENAND(this))
+ return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+
+ for (i = 0; i < 4; i++) {
+ ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i);
+ if (likely(!ecc))
+ continue;
+ if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
+ return ONENAND_ECC_2BIT_ALL;
+ else
+ result = ONENAND_ECC_1BIT_ALL;
+ }
+
+ return result;
+}
+
+/**
* onenand_wait - [DEFAULT] wait until the command is done
* @param mtd MTD device structure
* @param state state to select the max. timeout value
@@ -331,14 +497,14 @@ static int onenand_wait(struct mtd_info *mtd, int state)
* power off recovery (POR) test, it should read ECC status first
*/
if (interrupt & ONENAND_INT_READ) {
- int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+ int ecc = onenand_read_ecc(this);
if (ecc) {
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc);
mtd->ecc_stats.failed++;
return -EBADMSG;
} else if (ecc & ONENAND_ECC_1BIT_ALL) {
- printk(KERN_INFO "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
+ printk(KERN_DEBUG "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
mtd->ecc_stats.corrected++;
}
}
@@ -656,7 +822,7 @@ static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr)
if (found && ONENAND_IS_DDP(this)) {
/* Select DataRAM for DDP */
- int block = (int) (addr >> this->erase_shift);
+ int block = onenand_block(this, addr);
int value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
}
@@ -816,6 +982,149 @@ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int col
}
/**
+ * onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data
+ * @param mtd MTD device structure
+ * @param addr address to recover
+ * @param status return value from onenand_wait / onenand_bbt_wait
+ *
+ * MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has
+ * lower page address and MSB page has higher page address in paired pages.
+ * If power off occurs during MSB page program, the paired LSB page data can
+ * become corrupt. LSB page recovery read is a way to read LSB page though page
+ * data are corrupted. When uncorrectable error occurs as a result of LSB page
+ * read after power up, issue LSB page recovery read.
+ */
+static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i;
+
+ /* Recovery is only for Flex-OneNAND */
+ if (!FLEXONENAND(this))
+ return status;
+
+ /* check if we failed due to uncorrectable error */
+ if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR)
+ return status;
+
+ /* check if address lies in MLC region */
+ i = flexonenand_region(mtd, addr);
+ if (mtd->eraseregions[i].erasesize < (1 << this->erase_shift))
+ return status;
+
+ /* We are attempting to reread, so decrement stats.failed
+ * which was incremented by onenand_wait due to read failure
+ */
+ printk(KERN_INFO "onenand_recover_lsb: Attempting to recover from uncorrectable read\n");
+ mtd->ecc_stats.failed--;
+
+ /* Issue the LSB page recovery command */
+ this->command(mtd, FLEXONENAND_CMD_RECOVER_LSB, addr, this->writesize);
+ return this->wait(mtd, FL_READING);
+}
+
+/**
+ * onenand_mlc_read_ops_nolock - MLC OneNAND read main and/or out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops: oob operation description structure
+ *
+ * MLC OneNAND / Flex-OneNAND has 4KB page size and 4KB dataram.
+ * So, read-while-load is not present.
+ */
+static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ u_char *buf = ops->datbuf;
+ u_char *oobbuf = ops->oobbuf;
+ int read = 0, column, thislen;
+ int oobread = 0, oobcolumn, thisooblen, oobsize;
+ int ret = 0;
+ int writesize = this->writesize;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "onenand_mlc_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ oobcolumn = from & (mtd->oobsize - 1);
+
+ /* Do not allow reads past end of device */
+ if (from + len > mtd->size) {
+ printk(KERN_ERR "onenand_mlc_read_ops_nolock: Attempt read beyond end of device\n");
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ while (read < len) {
+ cond_resched();
+
+ thislen = min_t(int, writesize, len - read);
+
+ column = from & (writesize - 1);
+ if (column + thislen > writesize)
+ thislen = writesize - column;
+
+ if (!onenand_check_bufferram(mtd, from)) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (ret == -EBADMSG)
+ ret = 0;
+ }
+
+ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
+ if (oobbuf) {
+ thisooblen = oobsize - oobcolumn;
+ thisooblen = min_t(int, thisooblen, ooblen - oobread);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
+ else
+ this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
+ oobread += thisooblen;
+ oobbuf += thisooblen;
+ oobcolumn = 0;
+ }
+
+ read += thislen;
+ if (read == len)
+ break;
+
+ from += thislen;
+ buf += thislen;
+ }
+
+ /*
+ * Return success, if no ECC failures, else -EBADMSG
+ * fs driver will take care of that, because
+ * retlen == desired len and result == -EBADMSG
+ */
+ ops->retlen = read;
+ ops->oobretlen = oobread;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+/**
* onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band
* @param mtd MTD device structure
* @param from offset to read from
@@ -962,7 +1271,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
size_t len = ops->ooblen;
mtd_oob_mode_t mode = ops->mode;
u_char *buf = ops->oobbuf;
- int ret = 0;
+ int ret = 0, readcmd;
from += ops->ooboffs;
@@ -993,17 +1302,22 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
stats = mtd->ecc_stats;
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
while (read < len) {
cond_resched();
thislen = oobsize - column;
thislen = min_t(int, thislen, len);
- this->command(mtd, ONENAND_CMD_READOOB, from, mtd->oobsize);
+ this->command(mtd, readcmd, from, mtd->oobsize);
onenand_update_bufferram(mtd, from, 0);
ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
if (ret && ret != -EBADMSG) {
printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
break;
@@ -1053,6 +1367,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
+ struct onenand_chip *this = mtd->priv;
struct mtd_oob_ops ops = {
.len = len,
.ooblen = 0,
@@ -1062,7 +1377,9 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
int ret;
onenand_get_device(mtd, FL_READING);
- ret = onenand_read_ops_nolock(mtd, from, &ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ onenand_read_ops_nolock(mtd, from, &ops);
onenand_release_device(mtd);
*retlen = ops.retlen;
@@ -1080,6 +1397,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ struct onenand_chip *this = mtd->priv;
int ret;
switch (ops->mode) {
@@ -1094,7 +1412,9 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
onenand_get_device(mtd, FL_READING);
if (ops->datbuf)
- ret = onenand_read_ops_nolock(mtd, from, ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, ops) :
+ onenand_read_ops_nolock(mtd, from, ops);
else
ret = onenand_read_oob_nolock(mtd, from, ops);
onenand_release_device(mtd);
@@ -1128,11 +1448,11 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
if (interrupt & ONENAND_INT_READ) {
- int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+ int ecc = onenand_read_ecc(this);
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x"
", controller error 0x%04x\n", ecc, ctrl);
- return ONENAND_BBT_READ_ERROR;
+ return ONENAND_BBT_READ_ECC_ERROR;
}
} else {
printk(KERN_ERR "onenand_bbt_wait: read timeout!"
@@ -1163,7 +1483,7 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
{
struct onenand_chip *this = mtd->priv;
int read = 0, thislen, column;
- int ret = 0;
+ int ret = 0, readcmd;
size_t len = ops->ooblen;
u_char *buf = ops->oobbuf;
@@ -1183,17 +1503,22 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
column = from & (mtd->oobsize - 1);
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
while (read < len) {
cond_resched();
thislen = mtd->oobsize - column;
thislen = min_t(int, thislen, len);
- this->command(mtd, ONENAND_CMD_READOOB, from, mtd->oobsize);
+ this->command(mtd, readcmd, from, mtd->oobsize);
onenand_update_bufferram(mtd, from, 0);
- ret = onenand_bbt_wait(mtd, FL_READING);
+ ret = this->bbt_wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
if (ret)
break;
@@ -1230,9 +1555,11 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
{
struct onenand_chip *this = mtd->priv;
u_char *oob_buf = this->oob_buf;
- int status, i;
+ int status, i, readcmd;
- this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize);
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ this->command(mtd, readcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
status = this->wait(mtd, FL_READING);
if (status)
@@ -1633,7 +1960,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
{
struct onenand_chip *this = mtd->priv;
int column, ret = 0, oobsize;
- int written = 0;
+ int written = 0, oobcmd;
u_char *oobbuf;
size_t len = ops->ooblen;
const u_char *buf = ops->oobbuf;
@@ -1675,6 +2002,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
oobbuf = this->oob_buf;
+ oobcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
+
/* Loop until all data write */
while (written < len) {
int thislen = min_t(int, oobsize, len - written);
@@ -1692,7 +2021,14 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
memcpy(oobbuf + column, buf, thislen);
this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
- this->command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
+ if (ONENAND_IS_MLC(this)) {
+ /* Set main area of DataRAM to 0xff*/
+ memset(this->page_buf, 0xff, mtd->writesize);
+ this->write_bufferram(mtd, ONENAND_DATARAM,
+ this->page_buf, 0, mtd->writesize);
+ }
+
+ this->command(mtd, oobcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
if (ONENAND_IS_2PLANE(this)) {
@@ -1815,29 +2151,48 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct onenand_chip *this = mtd->priv;
unsigned int block_size;
- loff_t addr;
- int len;
- int ret = 0;
+ loff_t addr = instr->addr;
+ loff_t len = instr->len;
+ int ret = 0, i;
+ struct mtd_erase_region_info *region = NULL;
+ loff_t region_end = 0;
DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len);
- block_size = (1 << this->erase_shift);
-
- /* Start address must align on block boundary */
- if (unlikely(instr->addr & (block_size - 1))) {
- printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ /* Do not allow erase past end of device */
+ if (unlikely((len + addr) > mtd->size)) {
+ printk(KERN_ERR "onenand_erase: Erase past end of device\n");
return -EINVAL;
}
- /* Length must align on block boundary */
- if (unlikely(instr->len & (block_size - 1))) {
- printk(KERN_ERR "onenand_erase: Length not block aligned\n");
- return -EINVAL;
+ if (FLEXONENAND(this)) {
+ /* Find the eraseregion of this address */
+ i = flexonenand_region(mtd, addr);
+ region = &mtd->eraseregions[i];
+
+ block_size = region->erasesize;
+ region_end = region->offset + region->erasesize * region->numblocks;
+
+ /* Start address within region must align on block boundary.
+ * Erase region's start offset is always block start address.
+ */
+ if (unlikely((addr - region->offset) & (block_size - 1))) {
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ return -EINVAL;
+ }
+ } else {
+ block_size = 1 << this->erase_shift;
+
+ /* Start address must align on block boundary */
+ if (unlikely(addr & (block_size - 1))) {
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ return -EINVAL;
+ }
}
- /* Do not allow erase past end of device */
- if (unlikely((instr->len + instr->addr) > mtd->size)) {
- printk(KERN_ERR "onenand_erase: Erase past end of device\n");
+ /* Length must align on block boundary */
+ if (unlikely(len & (block_size - 1))) {
+ printk(KERN_ERR "onenand_erase: Length not block aligned\n");
return -EINVAL;
}
@@ -1847,9 +2202,6 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
onenand_get_device(mtd, FL_ERASING);
/* Loop throught the pages */
- len = instr->len;
- addr = instr->addr;
-
instr->state = MTD_ERASING;
while (len) {
@@ -1869,7 +2221,8 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
ret = this->wait(mtd, FL_ERASING);
/* Check, if it is write protected */
if (ret) {
- printk(KERN_ERR "onenand_erase: Failed erase, block %d\n", (unsigned) (addr >> this->erase_shift));
+ printk(KERN_ERR "onenand_erase: Failed erase, block %d\n",
+ onenand_block(this, addr));
instr->state = MTD_ERASE_FAILED;
instr->fail_addr = addr;
goto erase_exit;
@@ -1877,6 +2230,22 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
len -= block_size;
addr += block_size;
+
+ if (addr == region_end) {
+ if (!len)
+ break;
+ region++;
+
+ block_size = region->erasesize;
+ region_end = region->offset + region->erasesize * region->numblocks;
+
+ if (len & (block_size - 1)) {
+ /* FIXME: This should be handled at MTD partitioning level. */
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ goto erase_exit;
+ }
+ }
+
}
instr->state = MTD_ERASE_DONE;
@@ -1955,13 +2324,17 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
int block;
/* Get block number */
- block = ((int) ofs) >> bbm->bbt_erase_shift;
+ block = onenand_block(this, ofs);
if (bbm->bbt)
bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
/* We write two bytes, so we dont have to mess with 16 bit access */
ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
- return onenand_write_oob_nolock(mtd, ofs, &ops);
+ /* FIXME : What to do when marking SLC block in partition
+ * with MLC erasesize? For now, it is not advisable to
+ * create partitions containing both SLC and MLC regions.
+ */
+ return onenand_write_oob_nolock(mtd, ofs, &ops);
}
/**
@@ -2005,8 +2378,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
int start, end, block, value, status;
int wp_status_mask;
- start = ofs >> this->erase_shift;
- end = len >> this->erase_shift;
+ start = onenand_block(this, ofs);
+ end = onenand_block(this, ofs + len) - 1;
if (cmd == ONENAND_CMD_LOCK)
wp_status_mask = ONENAND_WP_LS;
@@ -2018,7 +2391,7 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
/* Set start block address */
this->write_word(start, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
/* Set end block address */
- this->write_word(start + end - 1, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
+ this->write_word(end, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
/* Write lock command */
this->command(mtd, cmd, 0, 0);
@@ -2039,7 +2412,7 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
}
/* Block lock scheme */
- for (block = start; block < start + end; block++) {
+ for (block = start; block < end + 1; block++) {
/* Set block address */
value = onenand_block_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
@@ -2147,7 +2520,7 @@ static void onenand_unlock_all(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
loff_t ofs = 0;
- size_t len = this->chipsize;
+ loff_t len = mtd->size;
if (this->options & ONENAND_HAS_UNLOCK_ALL) {
/* Set start block address */
@@ -2163,12 +2536,16 @@ static void onenand_unlock_all(struct mtd_info *mtd)
& ONENAND_CTRL_ONGO)
continue;
+ /* Don't check lock status */
+ if (this->options & ONENAND_SKIP_UNLOCK_CHECK)
+ return;
+
/* Check lock status */
if (onenand_check_lock_status(this))
return;
/* Workaround for all block unlock in DDP */
- if (ONENAND_IS_DDP(this)) {
+ if (ONENAND_IS_DDP(this) && !FLEXONENAND(this)) {
/* All blocks on another chip */
ofs = this->chipsize >> 1;
len = this->chipsize >> 1;
@@ -2210,7 +2587,9 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = onenand_read_ops_nolock(mtd, from, &ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ onenand_read_ops_nolock(mtd, from, &ops);
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -2277,21 +2656,32 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct onenand_chip *this = mtd->priv;
- struct mtd_oob_ops ops = {
- .mode = MTD_OOB_PLACE,
- .ooblen = len,
- .oobbuf = buf,
- .ooboffs = 0,
- };
+ struct mtd_oob_ops ops;
int ret;
/* Enter OTP access mode */
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = onenand_write_oob_nolock(mtd, from, &ops);
-
- *retlen = ops.oobretlen;
+ if (FLEXONENAND(this)) {
+ /*
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
+ */
+ ops.len = mtd->writesize;
+ ops.ooblen = 0;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
+ *retlen = ops.retlen;
+ } else {
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ret = onenand_write_oob_nolock(mtd, from, &ops);
+ *retlen = ops.oobretlen;
+ }
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -2475,27 +2865,34 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
struct onenand_chip *this = mtd->priv;
- u_char *oob_buf = this->oob_buf;
+ u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
size_t retlen;
int ret;
- memset(oob_buf, 0xff, mtd->oobsize);
+ memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
+ : mtd->oobsize);
/*
* Note: OTP lock operation
* OTP block : 0xXXFC
* 1st block : 0xXXF3 (If chip support)
* Both : 0xXXF0 (If chip support)
*/
- oob_buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
+ if (FLEXONENAND(this))
+ buf[FLEXONENAND_OTP_LOCK_OFFSET] = 0xFC;
+ else
+ buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
/*
* Write lock mark to 8th word of sector0 of page0 of the spare0.
* We write 16 bytes spare area instead of 2 bytes.
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
*/
+
from = 0;
- len = 16;
+ len = FLEXONENAND(this) ? mtd->writesize : 16;
- ret = onenand_otp_walk(mtd, from, len, &retlen, oob_buf, do_otp_lock, MTD_OTP_USER);
+ ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
return ret ? : retlen;
}
@@ -2542,6 +2939,14 @@ static void onenand_check_features(struct mtd_info *mtd)
break;
}
+ if (ONENAND_IS_MLC(this))
+ this->options &= ~ONENAND_HAS_2PLANE;
+
+ if (FLEXONENAND(this)) {
+ this->options &= ~ONENAND_HAS_CONT_LOCK;
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ }
+
if (this->options & ONENAND_HAS_CONT_LOCK)
printk(KERN_DEBUG "Lock scheme is Continuous Lock\n");
if (this->options & ONENAND_HAS_UNLOCK_ALL)
@@ -2559,14 +2964,16 @@ static void onenand_check_features(struct mtd_info *mtd)
*/
static void onenand_print_device_info(int device, int version)
{
- int vcc, demuxed, ddp, density;
+ int vcc, demuxed, ddp, density, flexonenand;
vcc = device & ONENAND_DEVICE_VCC_MASK;
demuxed = device & ONENAND_DEVICE_IS_DEMUX;
ddp = device & ONENAND_DEVICE_IS_DDP;
density = onenand_get_density(device);
- printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
- demuxed ? "" : "Muxed ",
+ flexonenand = device & DEVICE_IS_FLEXONENAND;
+ printk(KERN_INFO "%s%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
+ demuxed ? "" : "Muxed ",
+ flexonenand ? "Flex-" : "",
ddp ? "(DDP)" : "",
(16 << density),
vcc ? "2.65/3.3" : "1.8",
@@ -2576,6 +2983,7 @@ static void onenand_print_device_info(int device, int version)
static const struct onenand_manufacturers onenand_manuf_ids[] = {
{ONENAND_MFR_SAMSUNG, "Samsung"},
+ {ONENAND_MFR_NUMONYX, "Numonyx"},
};
/**
@@ -2605,6 +3013,261 @@ static int onenand_check_maf(int manuf)
}
/**
+* flexonenand_get_boundary - Reads the SLC boundary
+* @param onenand_info - onenand info structure
+**/
+static int flexonenand_get_boundary(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned die, bdry;
+ int ret, syscfg, locked;
+
+ /* Disable ECC */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg | 0x0100), this->base + ONENAND_REG_SYS_CFG1);
+
+ for (die = 0; die < this->dies; die++) {
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ ret = this->wait(mtd, FL_READING);
+
+ bdry = this->read_word(this->base + ONENAND_DATARAM);
+ if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
+ locked = 0;
+ else
+ locked = 1;
+ this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
+
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ ret = this->wait(mtd, FL_RESETING);
+
+ printk(KERN_INFO "Die %d boundary: %d%s\n", die,
+ this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
+ }
+
+ /* Enable ECC */
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+ return 0;
+}
+
+/**
+ * flexonenand_get_size - Fill up fields in onenand_chip and mtd_info
+ * boundary[], diesize[], mtd->size, mtd->erasesize
+ * @param mtd - MTD device structure
+ */
+static void flexonenand_get_size(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int die, i, eraseshift, density;
+ int blksperdie, maxbdry;
+ loff_t ofs;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((loff_t)(16 << density) << 20) >> (this->erase_shift);
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+ maxbdry = blksperdie - 1;
+ eraseshift = this->erase_shift - 1;
+
+ mtd->numeraseregions = this->dies << 1;
+
+ /* This fills up the device boundary */
+ flexonenand_get_boundary(mtd);
+ die = ofs = 0;
+ i = -1;
+ for (; die < this->dies; die++) {
+ if (!die || this->boundary[die-1] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks =
+ this->boundary[die] + 1;
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift++;
+ } else {
+ mtd->numeraseregions -= 1;
+ mtd->eraseregions[i].numblocks +=
+ this->boundary[die] + 1;
+ ofs += (this->boundary[die] + 1) << (eraseshift - 1);
+ }
+ if (this->boundary[die] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks = maxbdry ^
+ this->boundary[die];
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift--;
+ } else
+ mtd->numeraseregions -= 1;
+ }
+
+ /* Expose MLC erase size except when all blocks are SLC */
+ mtd->erasesize = 1 << this->erase_shift;
+ if (mtd->numeraseregions == 1)
+ mtd->erasesize >>= 1;
+
+ printk(KERN_INFO "Device has %d eraseregions\n", mtd->numeraseregions);
+ for (i = 0; i < mtd->numeraseregions; i++)
+ printk(KERN_INFO "[offset: 0x%08x, erasesize: 0x%05x,"
+ " numblocks: %04u]\n",
+ (unsigned int) mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+
+ for (die = 0, mtd->size = 0; die < this->dies; die++) {
+ this->diesize[die] = (loff_t)blksperdie << this->erase_shift;
+ this->diesize[die] -= (loff_t)(this->boundary[die] + 1)
+ << (this->erase_shift - 1);
+ mtd->size += this->diesize[die];
+ }
+}
+
+/**
+ * flexonenand_check_blocks_erased - Check if blocks are erased
+ * @param mtd_info - mtd info structure
+ * @param start - first erase block to check
+ * @param end - last erase block to check
+ *
+ * Converting an unerased block from MLC to SLC
+ * causes byte values to change. Since both data and its ECC
+ * have changed, reads on the block give uncorrectable error.
+ * This might lead to the block being detected as bad.
+ *
+ * Avoid this by ensuring that the block to be converted is
+ * erased.
+ */
+static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int end)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i, ret;
+ int block;
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OOB_PLACE,
+ .ooboffs = 0,
+ .ooblen = mtd->oobsize,
+ .datbuf = NULL,
+ .oobbuf = this->oob_buf,
+ };
+ loff_t addr;
+
+ printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end);
+
+ for (block = start; block <= end; block++) {
+ addr = flexonenand_addr(this, block);
+ if (onenand_block_isbad_nolock(mtd, addr, 0))
+ continue;
+
+ /*
+ * Since main area write results in ECC write to spare,
+ * it is sufficient to check only ECC bytes for change.
+ */
+ ret = onenand_read_oob_nolock(mtd, addr, &ops);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mtd->oobsize; i++)
+ if (this->oob_buf[i] != 0xff)
+ break;
+
+ if (i != mtd->oobsize) {
+ printk(KERN_WARNING "Block %d not erased.\n", block);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * flexonenand_set_boundary - Writes the SLC boundary
+ * @param mtd - mtd info structure
+ */
+int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+ int boundary, int lock)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret, density, blksperdie, old, new, thisboundary;
+ loff_t addr;
+
+ /* Change only once for SDP Flex-OneNAND */
+ if (die && (!ONENAND_IS_DDP(this)))
+ return 0;
+
+ /* boundary value of -1 indicates no required change */
+ if (boundary < 0 || boundary == this->boundary[die])
+ return 0;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((16 << density) << 20) >> this->erase_shift;
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+
+ if (boundary >= blksperdie) {
+ printk(KERN_ERR "flexonenand_set_boundary: Invalid boundary value. "
+ "Boundary not changed.\n");
+ return -EINVAL;
+ }
+
+ /* Check if converting blocks are erased */
+ old = this->boundary[die] + (die * this->density_mask);
+ new = boundary + (die * this->density_mask);
+ ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n");
+ return ret;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ /* Check is boundary is locked */
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ ret = this->wait(mtd, FL_READING);
+
+ thisboundary = this->read_word(this->base + ONENAND_DATARAM);
+ if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
+ printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n");
+ ret = 1;
+ goto out;
+ }
+
+ printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n",
+ die, boundary, lock ? "(Locked)" : "(Unlocked)");
+
+ addr = die ? this->diesize[0] : 0;
+
+ boundary &= FLEXONENAND_PI_MASK;
+ boundary |= lock ? 0 : (3 << FLEXONENAND_PI_UNLOCK_SHIFT);
+
+ this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
+ ret = this->wait(mtd, FL_ERASING);
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Failed PI erase for Die %d\n", die);
+ goto out;
+ }
+
+ this->write_word(boundary, this->base + ONENAND_DATARAM);
+ this->command(mtd, ONENAND_CMD_PROG, addr, 0);
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Failed PI write for Die %d\n", die);
+ goto out;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_UPDATE, die, 0);
+ ret = this->wait(mtd, FL_WRITING);
+out:
+ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
+ this->wait(mtd, FL_RESETING);
+ if (!ret)
+ /* Recalculate device size on boundary change*/
+ flexonenand_get_size(mtd);
+
+ return ret;
+}
+
+/**
* onenand_probe - [OneNAND Interface] Probe the OneNAND device
* @param mtd MTD device structure
*
@@ -2621,7 +3284,7 @@ static int onenand_probe(struct mtd_info *mtd)
/* Save system configuration 1 */
syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
/* Clear Sync. Burst Read mode to read BootRAM */
- this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ), this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE), this->base + ONENAND_REG_SYS_CFG1);
/* Send the command for reading device ID from BootRAM */
this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM);
@@ -2646,6 +3309,7 @@ static int onenand_probe(struct mtd_info *mtd)
maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
+ this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
/* Check OneNAND device */
if (maf_id != bram_maf_id || dev_id != bram_dev_id)
@@ -2657,29 +3321,55 @@ static int onenand_probe(struct mtd_info *mtd)
this->version_id = ver_id;
density = onenand_get_density(dev_id);
+ if (FLEXONENAND(this)) {
+ this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
+ /* Maximum possible erase regions */
+ mtd->numeraseregions = this->dies << 1;
+ mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
+ * (this->dies << 1), GFP_KERNEL);
+ if (!mtd->eraseregions)
+ return -ENOMEM;
+ }
+
+ /*
+ * For Flex-OneNAND, chipsize represents maximum possible device size.
+ * mtd->size represents the actual device size.
+ */
this->chipsize = (16 << density) << 20;
- /* Set density mask. it is used for DDP */
- if (ONENAND_IS_DDP(this))
- this->density_mask = (1 << (density + 6));
- else
- this->density_mask = 0;
/* OneNAND page size & block size */
/* The data buffer size is equal to page size */
mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
+ /* We use the full BufferRAM */
+ if (ONENAND_IS_MLC(this))
+ mtd->writesize <<= 1;
+
mtd->oobsize = mtd->writesize >> 5;
/* Pages per a block are always 64 in OneNAND */
mtd->erasesize = mtd->writesize << 6;
+ /*
+ * Flex-OneNAND SLC area has 64 pages per block.
+ * Flex-OneNAND MLC area has 128 pages per block.
+ * Expose MLC erase size to find erase_shift and page_mask.
+ */
+ if (FLEXONENAND(this))
+ mtd->erasesize <<= 1;
this->erase_shift = ffs(mtd->erasesize) - 1;
this->page_shift = ffs(mtd->writesize) - 1;
this->page_mask = (1 << (this->erase_shift - this->page_shift)) - 1;
+ /* Set density mask. it is used for DDP */
+ if (ONENAND_IS_DDP(this))
+ this->density_mask = this->chipsize >> (this->erase_shift + 1);
/* It's real page size */
this->writesize = mtd->writesize;
/* REVIST: Multichip handling */
- mtd->size = this->chipsize;
+ if (FLEXONENAND(this))
+ flexonenand_get_size(mtd);
+ else
+ mtd->size = this->chipsize;
/* Check OneNAND features */
onenand_check_features(mtd);
@@ -2734,7 +3424,7 @@ static void onenand_resume(struct mtd_info *mtd)
*/
int onenand_scan(struct mtd_info *mtd, int maxchips)
{
- int i;
+ int i, ret;
struct onenand_chip *this = mtd->priv;
if (!this->read_word)
@@ -2746,6 +3436,10 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
this->command = onenand_command;
if (!this->wait)
onenand_setup_wait(mtd);
+ if (!this->bbt_wait)
+ this->bbt_wait = onenand_bbt_wait;
+ if (!this->unlock_all)
+ this->unlock_all = onenand_unlock_all;
if (!this->read_bufferram)
this->read_bufferram = onenand_read_bufferram;
@@ -2796,6 +3490,10 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
* Allow subpage writes up to oobsize.
*/
switch (mtd->oobsize) {
+ case 128:
+ this->ecclayout = &onenand_oob_128;
+ mtd->subpage_sft = 0;
+ break;
case 64:
this->ecclayout = &onenand_oob_64;
mtd->subpage_sft = 2;
@@ -2859,9 +3557,18 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->owner = THIS_MODULE;
/* Unlock whole block */
- onenand_unlock_all(mtd);
+ this->unlock_all(mtd);
+
+ ret = this->scan_bbt(mtd);
+ if ((!FLEXONENAND(this)) || ret)
+ return ret;
- return this->scan_bbt(mtd);
+ /* Change Flex-OneNAND boundaries if required */
+ for (i = 0; i < MAX_DIES; i++)
+ flexonenand_set_boundary(mtd, i, flex_bdry[2 * i],
+ flex_bdry[(2 * i) + 1]);
+
+ return 0;
}
/**
@@ -2890,6 +3597,7 @@ void onenand_release(struct mtd_info *mtd)
kfree(this->page_buf);
if (this->options & ONENAND_OOBBUF_ALLOC)
kfree(this->oob_buf);
+ kfree(mtd->eraseregions);
}
EXPORT_SYMBOL_GPL(onenand_scan);
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 2f53b51c680..a91fcac1af0 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -63,6 +63,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
loff_t from;
size_t readlen, ooblen;
struct mtd_oob_ops ops;
+ int rgn;
printk(KERN_INFO "Scanning device for bad blocks\n");
@@ -76,7 +77,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
/* Note that numblocks is 2 * (real numblocks) here;
* see i += 2 below as it makses shifting and masking less painful
*/
- numblocks = mtd->size >> (bbm->bbt_erase_shift - 1);
+ numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
startblock = 0;
from = 0;
@@ -106,7 +107,12 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
}
}
i += 2;
- from += (1 << bbm->bbt_erase_shift);
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, from);
+ from += mtd->eraseregions[rgn].erasesize;
+ } else
+ from += (1 << bbm->bbt_erase_shift);
}
return 0;
@@ -143,7 +149,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
uint8_t res;
/* Get block number * 2 */
- block = (int) (offs >> (bbm->bbt_erase_shift - 1));
+ block = (int) (onenand_block(this, offs) << 1);
res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
@@ -178,7 +184,7 @@ int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
struct bbm_info *bbm = this->bbm;
int len, ret = 0;
- len = mtd->size >> (this->erase_shift + 2);
+ len = this->chipsize >> (this->erase_shift + 2);
/* Allocate memory (2bit per block) and clear the memory bad block table */
bbm->bbt = kzalloc(len, GFP_KERNEL);
if (!bbm->bbt) {
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
index d64200b7c94..f6e3c8aebd3 100644
--- a/drivers/mtd/onenand/onenand_sim.c
+++ b/drivers/mtd/onenand/onenand_sim.c
@@ -6,6 +6,10 @@
* Copyright © 2005-2007 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
+ * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
+ * Flex-OneNAND simulator support
+ * Copyright (C) Samsung Electronics, 2008
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -24,16 +28,38 @@
#ifndef CONFIG_ONENAND_SIM_MANUFACTURER
#define CONFIG_ONENAND_SIM_MANUFACTURER 0xec
#endif
+
#ifndef CONFIG_ONENAND_SIM_DEVICE_ID
#define CONFIG_ONENAND_SIM_DEVICE_ID 0x04
#endif
+
+#define CONFIG_FLEXONENAND ((CONFIG_ONENAND_SIM_DEVICE_ID >> 9) & 1)
+
#ifndef CONFIG_ONENAND_SIM_VERSION_ID
#define CONFIG_ONENAND_SIM_VERSION_ID 0x1e
#endif
+#ifndef CONFIG_ONENAND_SIM_TECHNOLOGY_ID
+#define CONFIG_ONENAND_SIM_TECHNOLOGY_ID CONFIG_FLEXONENAND
+#endif
+
+/* Initial boundary values for Flex-OneNAND Simulator */
+#ifndef CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY
+#define CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY 0x01
+#endif
+
+#ifndef CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY
+#define CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY 0x01
+#endif
+
static int manuf_id = CONFIG_ONENAND_SIM_MANUFACTURER;
static int device_id = CONFIG_ONENAND_SIM_DEVICE_ID;
static int version_id = CONFIG_ONENAND_SIM_VERSION_ID;
+static int technology_id = CONFIG_ONENAND_SIM_TECHNOLOGY_ID;
+static int boundary[] = {
+ CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY,
+ CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY,
+};
struct onenand_flash {
void __iomem *base;
@@ -57,12 +83,18 @@ struct onenand_flash {
(writew(v, this->base + ONENAND_REG_WP_STATUS))
/* It has all 0xff chars */
-#define MAX_ONENAND_PAGESIZE (2048 + 64)
+#define MAX_ONENAND_PAGESIZE (4096 + 128)
static unsigned char *ffchars;
+#if CONFIG_FLEXONENAND
+#define PARTITION_NAME "Flex-OneNAND simulator partition"
+#else
+#define PARTITION_NAME "OneNAND simulator partition"
+#endif
+
static struct mtd_partition os_partitions[] = {
{
- .name = "OneNAND simulator partition",
+ .name = PARTITION_NAME,
.offset = 0,
.size = MTDPART_SIZ_FULL,
},
@@ -104,6 +136,7 @@ static void onenand_lock_handle(struct onenand_chip *this, int cmd)
switch (cmd) {
case ONENAND_CMD_UNLOCK:
+ case ONENAND_CMD_UNLOCK_ALL:
if (block_lock_scheme)
ONENAND_SET_WP_STATUS(ONENAND_WP_US, this);
else
@@ -228,10 +261,12 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
{
struct mtd_info *mtd = &info->mtd;
struct onenand_flash *flash = this->priv;
- int main_offset, spare_offset;
+ int main_offset, spare_offset, die = 0;
void __iomem *src;
void __iomem *dest;
unsigned int i;
+ static int pi_operation;
+ int erasesize, rgn;
if (dataram) {
main_offset = mtd->writesize;
@@ -241,10 +276,27 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
spare_offset = 0;
}
+ if (pi_operation) {
+ die = readw(this->base + ONENAND_REG_START_ADDRESS2);
+ die >>= ONENAND_DDP_SHIFT;
+ }
+
switch (cmd) {
+ case FLEXONENAND_CMD_PI_ACCESS:
+ pi_operation = 1;
+ break;
+
+ case ONENAND_CMD_RESET:
+ pi_operation = 0;
+ break;
+
case ONENAND_CMD_READ:
src = ONENAND_CORE(flash) + offset;
dest = ONENAND_MAIN_AREA(this, main_offset);
+ if (pi_operation) {
+ writew(boundary[die], this->base + ONENAND_DATARAM);
+ break;
+ }
memcpy(dest, src, mtd->writesize);
/* Fall through */
@@ -257,6 +309,10 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
case ONENAND_CMD_PROG:
src = ONENAND_MAIN_AREA(this, main_offset);
dest = ONENAND_CORE(flash) + offset;
+ if (pi_operation) {
+ boundary[die] = readw(this->base + ONENAND_DATARAM);
+ break;
+ }
/* To handle partial write */
for (i = 0; i < (1 << mtd->subpage_sft); i++) {
int off = i * this->subpagesize;
@@ -284,9 +340,18 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
break;
case ONENAND_CMD_ERASE:
- memset(ONENAND_CORE(flash) + offset, 0xff, mtd->erasesize);
+ if (pi_operation)
+ break;
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, offset);
+ erasesize = mtd->eraseregions[rgn].erasesize;
+ } else
+ erasesize = mtd->erasesize;
+
+ memset(ONENAND_CORE(flash) + offset, 0xff, erasesize);
memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff,
- (mtd->erasesize >> 5));
+ (erasesize >> 5));
break;
default:
@@ -339,7 +404,7 @@ static void onenand_command_handle(struct onenand_chip *this, int cmd)
}
if (block != -1)
- offset += block << this->erase_shift;
+ offset = onenand_addr(this, block);
if (page != -1)
offset += page << this->page_shift;
@@ -390,6 +455,7 @@ static int __init flash_init(struct onenand_flash *flash)
}
density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ density &= ONENAND_DEVICE_DENSITY_MASK;
size = ((16 << 20) << density);
ONENAND_CORE(flash) = vmalloc(size + (size >> 5));
@@ -405,8 +471,9 @@ static int __init flash_init(struct onenand_flash *flash)
writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID);
writew(device_id, flash->base + ONENAND_REG_DEVICE_ID);
writew(version_id, flash->base + ONENAND_REG_VERSION_ID);
+ writew(technology_id, flash->base + ONENAND_REG_TECHNOLOGY);
- if (density < 2)
+ if (density < 2 && (!CONFIG_FLEXONENAND))
buffer_size = 0x0400; /* 1KiB page */
else
buffer_size = 0x0800; /* 2KiB page */
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 3f063108e95..b1cd7a1a219 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -49,15 +49,16 @@ config MTD_UBI_BEB_RESERVE
reserved. Leave the default value if unsure.
config MTD_UBI_GLUEBI
- bool "Emulate MTD devices"
+ tristate "MTD devices emulation driver (gluebi)"
default n
depends on MTD_UBI
help
- This option enables MTD devices emulation on top of UBI volumes: for
- each UBI volumes an MTD device is created, and all I/O to this MTD
- device is redirected to the UBI volume. This is handy to make
- MTD-oriented software (like JFFS2) work on top of UBI. Do not enable
- this if no legacy software will be used.
+ This option enables gluebi - an additional driver which emulates MTD
+ devices on top of UBI volumes: for each UBI volumes an MTD device is
+ created, and all I/O to this MTD device is redirected to the UBI
+ volume. This is handy to make MTD-oriented software (like JFFS2)
+ work on top of UBI. Do not enable this unless you use legacy
+ software.
source "drivers/mtd/ubi/Kconfig.debug"
endmenu
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index dd834e04151..c9302a5452b 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -4,4 +4,4 @@ ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o
ubi-y += misc.o
ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
-ubi-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
+obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 4048db83aef..e1f7d0a78b9 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -41,6 +41,7 @@
#include <linux/miscdevice.h>
#include <linux/log2.h>
#include <linux/kthread.h>
+#include <linux/reboot.h>
#include "ubi.h"
/* Maximum length of the 'mtd=' parameter */
@@ -122,6 +123,94 @@ static struct device_attribute dev_mtd_num =
__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
/**
+ * ubi_volume_notify - send a volume change notification.
+ * @ubi: UBI device description object
+ * @vol: volume description object of the changed volume
+ * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
+ *
+ * This is a helper function which notifies all subscribers about a volume
+ * change event (creation, removal, re-sizing, re-naming, updating). Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
+{
+ struct ubi_notification nt;
+
+ ubi_do_get_device_info(ubi, &nt.di);
+ ubi_do_get_volume_info(ubi, vol, &nt.vi);
+ return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
+}
+
+/**
+ * ubi_notify_all - send a notification to all volumes.
+ * @ubi: UBI device description object
+ * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
+ * @nb: the notifier to call
+ *
+ * This function walks all volumes of UBI device @ubi and sends the @ntype
+ * notification for each volume. If @nb is %NULL, then all registered notifiers
+ * are called, otherwise only the @nb notifier is called. Returns the number of
+ * sent notifications.
+ */
+int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
+{
+ struct ubi_notification nt;
+ int i, count = 0;
+
+ ubi_do_get_device_info(ubi, &nt.di);
+
+ mutex_lock(&ubi->device_mutex);
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ /*
+ * Since the @ubi->device is locked, and we are not going to
+ * change @ubi->volumes, we do not have to lock
+ * @ubi->volumes_lock.
+ */
+ if (!ubi->volumes[i])
+ continue;
+
+ ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
+ if (nb)
+ nb->notifier_call(nb, ntype, &nt);
+ else
+ blocking_notifier_call_chain(&ubi_notifiers, ntype,
+ &nt);
+ count += 1;
+ }
+ mutex_unlock(&ubi->device_mutex);
+
+ return count;
+}
+
+/**
+ * ubi_enumerate_volumes - send "add" notification for all existing volumes.
+ * @nb: the notifier to call
+ *
+ * This function walks all UBI devices and volumes and sends the
+ * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
+ * registered notifiers are called, otherwise only the @nb notifier is called.
+ * Returns the number of sent notifications.
+ */
+int ubi_enumerate_volumes(struct notifier_block *nb)
+{
+ int i, count = 0;
+
+ /*
+ * Since the @ubi_devices_mutex is locked, and we are not going to
+ * change @ubi_devices, we do not have to lock @ubi_devices_lock.
+ */
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ struct ubi_device *ubi = ubi_devices[i];
+
+ if (!ubi)
+ continue;
+ count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
+ }
+
+ return count;
+}
+
+/**
* ubi_get_device - get UBI device.
* @ubi_num: UBI device number
*
@@ -380,7 +469,7 @@ static void free_user_volumes(struct ubi_device *ubi)
* @ubi: UBI device description object
*
* This function returns zero in case of success and a negative error code in
- * case of failure. Note, this function destroys all volumes if it failes.
+ * case of failure. Note, this function destroys all volumes if it fails.
*/
static int uif_init(struct ubi_device *ubi)
{
@@ -568,6 +657,11 @@ static int io_init(struct ubi_device *ubi)
if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
ubi->bad_allowed = 1;
+ if (ubi->mtd->type == MTD_NORFLASH) {
+ ubi_assert(ubi->mtd->writesize == 1);
+ ubi->nor_flash = 1;
+ }
+
ubi->min_io_size = ubi->mtd->writesize;
ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
@@ -633,6 +727,15 @@ static int io_init(struct ubi_device *ubi)
}
/*
+ * Set maximum amount of physical erroneous eraseblocks to be 10%.
+ * Erroneous PEB are those which have read errors.
+ */
+ ubi->max_erroneous = ubi->peb_count / 10;
+ if (ubi->max_erroneous < 16)
+ ubi->max_erroneous = 16;
+ dbg_msg("max_erroneous %d", ubi->max_erroneous);
+
+ /*
* It may happen that EC and VID headers are situated in one minimal
* I/O unit. In this case we can only accept this UBI image in
* read-only mode.
@@ -726,6 +829,34 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
}
/**
+ * ubi_reboot_notifier - halt UBI transactions immediately prior to a reboot.
+ * @n: reboot notifier object
+ * @state: SYS_RESTART, SYS_HALT, or SYS_POWER_OFF
+ * @cmd: pointer to command string for RESTART2
+ *
+ * This function stops the UBI background thread so that the flash device
+ * remains quiescent when Linux restarts the system. Any queued work will be
+ * discarded, but this function will block until do_work() finishes if an
+ * operation is already in progress.
+ *
+ * This function solves a real-life problem observed on NOR flashes when an
+ * PEB erase operation starts, then the system is rebooted before the erase is
+ * finishes, and the boot loader gets confused and dies. So we prefer to finish
+ * the ongoing operation before rebooting.
+ */
+static int ubi_reboot_notifier(struct notifier_block *n, unsigned long state,
+ void *cmd)
+{
+ struct ubi_device *ubi;
+
+ ubi = container_of(n, struct ubi_device, reboot_notifier);
+ if (ubi->bgt_thread)
+ kthread_stop(ubi->bgt_thread);
+ ubi_sync(ubi->ubi_num);
+ return NOTIFY_DONE;
+}
+
+/**
* ubi_attach_mtd_dev - attach an MTD device.
* @mtd: MTD device description object
* @ubi_num: number to assign to the new UBI device
@@ -806,8 +937,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
mutex_init(&ubi->buf_mutex);
mutex_init(&ubi->ckvol_mutex);
- mutex_init(&ubi->mult_mutex);
- mutex_init(&ubi->volumes_mutex);
+ mutex_init(&ubi->device_mutex);
spin_lock_init(&ubi->volumes_lock);
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
@@ -825,7 +955,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (!ubi->peb_buf2)
goto out_free;
-#ifdef CONFIG_MTD_UBI_DEBUG
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
mutex_init(&ubi->dbg_buf_mutex);
ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
if (!ubi->dbg_peb_buf)
@@ -871,12 +1001,25 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
ubi_msg("number of PEBs reserved for bad PEB handling: %d",
ubi->beb_rsvd_pebs);
ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
+ ubi_msg("image sequence number: %d", ubi->image_seq);
+ /*
+ * The below lock makes sure we do not race with 'ubi_thread()' which
+ * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
+ */
+ spin_lock(&ubi->wl_lock);
if (!DBG_DISABLE_BGT)
ubi->thread_enabled = 1;
wake_up_process(ubi->bgt_thread);
+ spin_unlock(&ubi->wl_lock);
+
+ /* Flash device priority is 0 - UBI needs to shut down first */
+ ubi->reboot_notifier.priority = 1;
+ ubi->reboot_notifier.notifier_call = ubi_reboot_notifier;
+ register_reboot_notifier(&ubi->reboot_notifier);
ubi_devices[ubi_num] = ubi;
+ ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
out_uif:
@@ -892,7 +1035,7 @@ out_detach:
out_free:
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
-#ifdef CONFIG_MTD_UBI_DEBUG
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
vfree(ubi->dbg_peb_buf);
#endif
kfree(ubi);
@@ -919,13 +1062,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return -EINVAL;
- spin_lock(&ubi_devices_lock);
- ubi = ubi_devices[ubi_num];
- if (!ubi) {
- spin_unlock(&ubi_devices_lock);
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
return -EINVAL;
- }
+ spin_lock(&ubi_devices_lock);
+ put_device(&ubi->dev);
+ ubi->ref_count -= 1;
if (ubi->ref_count) {
if (!anyway) {
spin_unlock(&ubi_devices_lock);
@@ -939,12 +1082,14 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
spin_unlock(&ubi_devices_lock);
ubi_assert(ubi_num == ubi->ubi_num);
+ ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
/*
* Before freeing anything, we have to stop the background thread to
* prevent it from doing anything on this device while we are freeing.
*/
+ unregister_reboot_notifier(&ubi->reboot_notifier);
if (ubi->bgt_thread)
kthread_stop(ubi->bgt_thread);
@@ -961,7 +1106,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
put_mtd_device(ubi->mtd);
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
-#ifdef CONFIG_MTD_UBI_DEBUG
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
vfree(ubi->dbg_peb_buf);
#endif
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index f8e0f68f218..f237ddbb271 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -113,7 +113,8 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
else
mode = UBI_READONLY;
- dbg_gen("open volume %d, mode %d", vol_id, mode);
+ dbg_gen("open device %d, volume %d, mode %d",
+ ubi_num, vol_id, mode);
desc = ubi_open_volume(ubi_num, vol_id, mode);
if (IS_ERR(desc))
@@ -128,7 +129,8 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
- dbg_gen("release volume %d, mode %d", vol->vol_id, desc->mode);
+ dbg_gen("release device %d, volume %d, mode %d",
+ vol->ubi->ubi_num, vol->vol_id, desc->mode);
if (vol->updating) {
ubi_warn("update of volume %d not finished, volume is damaged",
@@ -393,7 +395,7 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
vol->corrupted = 1;
}
vol->checked = 1;
- ubi_gluebi_updated(vol);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
revoke_exclusive(desc, UBI_READWRITE);
}
@@ -558,7 +560,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
- /* Set volume property command*/
+ /* Set volume property command */
case UBI_IOCSETPROP:
{
struct ubi_set_prop_req req;
@@ -571,9 +573,9 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
}
switch (req.property) {
case UBI_PROP_DIRECT_WRITE:
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
desc->vol->direct_writes = !!req.value;
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
break;
default:
err = -EINVAL;
@@ -810,9 +812,9 @@ static int rename_volumes(struct ubi_device *ubi,
re->desc->vol->vol_id, re->desc->vol->name);
}
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_rename_volumes(ubi, &rename_list);
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
out_free:
list_for_each_entry_safe(re, re1, &rename_list, list) {
@@ -856,9 +858,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
if (err)
break;
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_create_volume(ubi, &req);
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
if (err)
break;
@@ -887,9 +889,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_remove_volume(desc, 0);
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
/*
* The volume is deleted (unless an error occurred), and the
@@ -926,9 +928,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
desc->vol->usable_leb_size);
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_resize_volume(desc, pebs);
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
ubi_close_volume(desc);
break;
}
@@ -952,9 +954,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
- mutex_lock(&ubi->mult_mutex);
err = rename_volumes(ubi, req);
- mutex_unlock(&ubi->mult_mutex);
kfree(req);
break;
}
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index c0ed60e8ade..54b0186915f 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -44,6 +44,8 @@ void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
be32_to_cpu(ec_hdr->vid_hdr_offset));
printk(KERN_DEBUG "\tdata_offset %d\n",
be32_to_cpu(ec_hdr->data_offset));
+ printk(KERN_DEBUG "\timage_seq %d\n",
+ be32_to_cpu(ec_hdr->image_seq));
printk(KERN_DEBUG "\thdr_crc %#08x\n",
be32_to_cpu(ec_hdr->hdr_crc));
printk(KERN_DEBUG "erase counter header hexdump:\n");
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 13777e5beac..a4da7a09b94 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -93,6 +93,12 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
#define UBI_IO_DEBUG 0
#endif
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
+#else
+#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0
+#endif
+
#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT
#define DBG_DISABLE_BGT 1
#else
@@ -167,6 +173,7 @@ static inline int ubi_dbg_is_erase_failure(void)
#define ubi_dbg_is_bitflip() 0
#define ubi_dbg_is_write_failure() 0
#define ubi_dbg_is_erase_failure() 0
+#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0
#endif /* !CONFIG_MTD_UBI_DEBUG */
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 25def348e5b..e4d9ef0c965 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -419,8 +419,9 @@ retry:
* not implemented.
*/
if (err == UBI_IO_BAD_VID_HDR) {
- ubi_warn("bad VID header at PEB %d, LEB"
- "%d:%d", pnum, vol_id, lnum);
+ ubi_warn("corrupted VID header at PEB "
+ "%d, LEB %d:%d", pnum, vol_id,
+ lnum);
err = -EBADMSG;
} else
ubi_ro_mode(ubi);
@@ -940,6 +941,33 @@ write_error:
}
/**
+ * is_error_sane - check whether a read error is sane.
+ * @err: code of the error happened during reading
+ *
+ * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
+ * cannot read data from the target PEB (an error @err happened). If the error
+ * code is sane, then we treat this error as non-fatal. Otherwise the error is
+ * fatal and UBI will be switched to R/O mode later.
+ *
+ * The idea is that we try not to switch to R/O mode if the read error is
+ * something which suggests there was a real read problem. E.g., %-EIO. Or a
+ * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
+ * mode, simply because we do not know what happened at the MTD level, and we
+ * cannot handle this. E.g., the underlying driver may have become crazy, and
+ * it is safer to switch to R/O mode to preserve the data.
+ *
+ * And bear in mind, this is about reading from the target PEB, i.e. the PEB
+ * which we have just written.
+ */
+static int is_error_sane(int err)
+{
+ if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_VID_HDR ||
+ err == -ETIMEDOUT)
+ return 0;
+ return 1;
+}
+
+/**
* ubi_eba_copy_leb - copy logical eraseblock.
* @ubi: UBI device description object
* @from: physical eraseblock number from where to copy
@@ -950,12 +978,7 @@ write_error:
* physical eraseblock @to. The @vid_hdr buffer may be changed by this
* function. Returns:
* o %0 in case of success;
- * o %1 if the operation was canceled because the volume is being deleted
- * or because the PEB was put meanwhile;
- * o %2 if the operation was canceled because there was a write error to the
- * target PEB;
- * o %-EAGAIN if the operation was canceled because a bit-flip was detected
- * in the target PEB;
+ * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_CANCEL_BITFLIPS, etc;
* o a negative error code in case of failure.
*/
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
@@ -968,7 +991,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
- dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
+ dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
if (vid_hdr->vol_type == UBI_VID_STATIC) {
data_size = be32_to_cpu(vid_hdr->data_size);
@@ -986,13 +1009,12 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
*/
vol = ubi->volumes[idx];
+ spin_unlock(&ubi->volumes_lock);
if (!vol) {
/* No need to do further work, cancel */
- dbg_eba("volume %d is being removed, cancel", vol_id);
- spin_unlock(&ubi->volumes_lock);
- return 1;
+ dbg_wl("volume %d is being removed, cancel", vol_id);
+ return MOVE_CANCEL_RACE;
}
- spin_unlock(&ubi->volumes_lock);
/*
* We do not want anybody to write to this logical eraseblock while we
@@ -1004,12 +1026,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* (@from). This task locks the LEB and goes sleep in the
* 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
* holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
- * LEB is already locked, we just do not move it and return %1.
+ * LEB is already locked, we just do not move it and return
+ * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
*/
err = leb_write_trylock(ubi, vol_id, lnum);
if (err) {
- dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
- return err;
+ dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
+ return MOVE_CANCEL_RACE;
}
/*
@@ -1018,25 +1041,26 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* cancel it.
*/
if (vol->eba_tbl[lnum] != from) {
- dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
- "PEB %d, cancel", vol_id, lnum, from,
- vol->eba_tbl[lnum]);
- err = 1;
+ dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to "
+ "PEB %d, cancel", vol_id, lnum, from,
+ vol->eba_tbl[lnum]);
+ err = MOVE_CANCEL_RACE;
goto out_unlock_leb;
}
/*
* OK, now the LEB is locked and we can safely start moving it. Since
- * this function utilizes the @ubi->peb1_buf buffer which is shared
- * with some other functions, so lock the buffer by taking the
+ * this function utilizes the @ubi->peb_buf1 buffer which is shared
+ * with some other functions - we lock the buffer by taking the
* @ubi->buf_mutex.
*/
mutex_lock(&ubi->buf_mutex);
- dbg_eba("read %d bytes of data", aldata_size);
+ dbg_wl("read %d bytes of data", aldata_size);
err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
if (err && err != UBI_IO_BITFLIPS) {
ubi_warn("error %d while reading data from PEB %d",
err, from);
+ err = MOVE_SOURCE_RD_ERR;
goto out_unlock_buf;
}
@@ -1059,7 +1083,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
cond_resched();
/*
- * It may turn out to me that the whole @from physical eraseblock
+ * It may turn out to be that the whole @from physical eraseblock
* contains only 0xFF bytes. Then we have to only write the VID header
* and do not write any data. This also means we should not set
* @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
@@ -1074,7 +1098,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err) {
if (err == -EIO)
- err = 2;
+ err = MOVE_TARGET_WR_ERR;
goto out_unlock_buf;
}
@@ -1083,10 +1107,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
/* Read the VID header back and check if it was written correctly */
err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
if (err) {
- if (err != UBI_IO_BITFLIPS)
- ubi_warn("cannot read VID header back from PEB %d", to);
- else
- err = -EAGAIN;
+ if (err != UBI_IO_BITFLIPS) {
+ ubi_warn("error %d while reading VID header back from "
+ "PEB %d", err, to);
+ if (is_error_sane(err))
+ err = MOVE_TARGET_RD_ERR;
+ } else
+ err = MOVE_CANCEL_BITFLIPS;
goto out_unlock_buf;
}
@@ -1094,7 +1121,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
if (err) {
if (err == -EIO)
- err = 2;
+ err = MOVE_TARGET_WR_ERR;
goto out_unlock_buf;
}
@@ -1107,11 +1134,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
if (err) {
- if (err != UBI_IO_BITFLIPS)
- ubi_warn("cannot read data back from PEB %d",
- to);
- else
- err = -EAGAIN;
+ if (err != UBI_IO_BITFLIPS) {
+ ubi_warn("error %d while reading data back "
+ "from PEB %d", err, to);
+ if (is_error_sane(err))
+ err = MOVE_TARGET_RD_ERR;
+ } else
+ err = MOVE_CANCEL_BITFLIPS;
goto out_unlock_buf;
}
@@ -1225,6 +1254,7 @@ out_free:
if (!ubi->volumes[i])
continue;
kfree(ubi->volumes[i]->eba_tbl);
+ ubi->volumes[i]->eba_tbl = NULL;
}
return err;
}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 49cd55ade9c..b5e478fa266 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -19,17 +19,71 @@
*/
/*
- * This file includes implementation of fake MTD devices for each UBI volume.
- * This sounds strange, but it is in fact quite useful to make MTD-oriented
- * software (including all the legacy software) to work on top of UBI.
+ * This is a small driver which implements fake MTD devices on top of UBI
+ * volumes. This sounds strange, but it is in fact quite useful to make
+ * MTD-oriented software (including all the legacy software) work on top of
+ * UBI.
*
* Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit
- * size (mtd->writesize) is equivalent to the UBI minimal I/O unit. The
+ * size (@mtd->writesize) is equivalent to the UBI minimal I/O unit. The
* eraseblock size is equivalent to the logical eraseblock size of the volume.
*/
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/sched.h>
#include <linux/math64.h>
-#include "ubi.h"
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mtd/ubi.h>
+#include <linux/mtd/mtd.h>
+#include "ubi-media.h"
+
+#define err_msg(fmt, ...) \
+ printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \
+ current->pid, __func__, ##__VA_ARGS__)
+
+/**
+ * struct gluebi_device - a gluebi device description data structure.
+ * @mtd: emulated MTD device description object
+ * @refcnt: gluebi device reference count
+ * @desc: UBI volume descriptor
+ * @ubi_num: UBI device number this gluebi device works on
+ * @vol_id: ID of UBI volume this gluebi device works on
+ * @list: link in a list of gluebi devices
+ */
+struct gluebi_device {
+ struct mtd_info mtd;
+ int refcnt;
+ struct ubi_volume_desc *desc;
+ int ubi_num;
+ int vol_id;
+ struct list_head list;
+};
+
+/* List of all gluebi devices */
+static LIST_HEAD(gluebi_devices);
+static DEFINE_MUTEX(devices_mutex);
+
+/**
+ * find_gluebi_nolock - find a gluebi device.
+ * @ubi_num: UBI device number
+ * @vol_id: volume ID
+ *
+ * This function seraches for gluebi device corresponding to UBI device
+ * @ubi_num and UBI volume @vol_id. Returns the gluebi device description
+ * object in case of success and %NULL in case of failure. The caller has to
+ * have the &devices_mutex locked.
+ */
+static struct gluebi_device *find_gluebi_nolock(int ubi_num, int vol_id)
+{
+ struct gluebi_device *gluebi;
+
+ list_for_each_entry(gluebi, &gluebi_devices, list)
+ if (gluebi->ubi_num == ubi_num && gluebi->vol_id == vol_id)
+ return gluebi;
+ return NULL;
+}
/**
* gluebi_get_device - get MTD device reference.
@@ -41,15 +95,18 @@
*/
static int gluebi_get_device(struct mtd_info *mtd)
{
- struct ubi_volume *vol;
+ struct gluebi_device *gluebi;
+ int ubi_mode = UBI_READONLY;
- vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
- /*
- * We do not introduce locks for gluebi reference count because the
- * get_device()/put_device() calls are already serialized at MTD.
- */
- if (vol->gluebi_refcount > 0) {
+ if (mtd->flags & MTD_WRITEABLE)
+ ubi_mode = UBI_READWRITE;
+
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+ mutex_lock(&devices_mutex);
+ if (gluebi->refcnt > 0) {
/*
* The MTD device is already referenced and this is just one
* more reference. MTD allows many users to open the same
@@ -58,7 +115,8 @@ static int gluebi_get_device(struct mtd_info *mtd)
* open the UBI volume again - just increase the reference
* counter and return.
*/
- vol->gluebi_refcount += 1;
+ gluebi->refcnt += 1;
+ mutex_unlock(&devices_mutex);
return 0;
}
@@ -66,11 +124,15 @@ static int gluebi_get_device(struct mtd_info *mtd)
* This is the first reference to this UBI volume via the MTD device
* interface. Open the corresponding volume in read-write mode.
*/
- vol->gluebi_desc = ubi_open_volume(vol->ubi->ubi_num, vol->vol_id,
- UBI_READWRITE);
- if (IS_ERR(vol->gluebi_desc))
- return PTR_ERR(vol->gluebi_desc);
- vol->gluebi_refcount += 1;
+ gluebi->desc = ubi_open_volume(gluebi->ubi_num, gluebi->vol_id,
+ ubi_mode);
+ if (IS_ERR(gluebi->desc)) {
+ mutex_unlock(&devices_mutex);
+ module_put(THIS_MODULE);
+ return PTR_ERR(gluebi->desc);
+ }
+ gluebi->refcnt += 1;
+ mutex_unlock(&devices_mutex);
return 0;
}
@@ -83,13 +145,15 @@ static int gluebi_get_device(struct mtd_info *mtd)
*/
static void gluebi_put_device(struct mtd_info *mtd)
{
- struct ubi_volume *vol;
-
- vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
- vol->gluebi_refcount -= 1;
- ubi_assert(vol->gluebi_refcount >= 0);
- if (vol->gluebi_refcount == 0)
- ubi_close_volume(vol->gluebi_desc);
+ struct gluebi_device *gluebi;
+
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+ mutex_lock(&devices_mutex);
+ gluebi->refcnt -= 1;
+ if (gluebi->refcnt == 0)
+ ubi_close_volume(gluebi->desc);
+ module_put(THIS_MODULE);
+ mutex_unlock(&devices_mutex);
}
/**
@@ -107,16 +171,12 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
int err = 0, lnum, offs, total_read;
- struct ubi_volume *vol;
- struct ubi_device *ubi;
-
- dbg_gen("read %zd bytes from offset %lld", len, from);
+ struct gluebi_device *gluebi;
if (len < 0 || from < 0 || from + len > mtd->size)
return -EINVAL;
- vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
- ubi = vol->ubi;
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
lnum = div_u64_rem(from, mtd->erasesize, &offs);
total_read = len;
@@ -126,7 +186,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
if (to_read > total_read)
to_read = total_read;
- err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0);
+ err = ubi_read(gluebi->desc, lnum, buf, offs, to_read);
if (err)
break;
@@ -152,21 +212,17 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
* case of failure.
*/
static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+ size_t *retlen, const u_char *buf)
{
int err = 0, lnum, offs, total_written;
- struct ubi_volume *vol;
- struct ubi_device *ubi;
-
- dbg_gen("write %zd bytes to offset %lld", len, to);
+ struct gluebi_device *gluebi;
if (len < 0 || to < 0 || len + to > mtd->size)
return -EINVAL;
- vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
- ubi = vol->ubi;
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
- if (ubi->ro_mode)
+ if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
lnum = div_u64_rem(to, mtd->erasesize, &offs);
@@ -181,8 +237,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
if (to_write > total_written)
to_write = total_written;
- err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write,
- UBI_UNKNOWN);
+ err = ubi_write(gluebi->desc, lnum, buf, offs, to_write);
if (err)
break;
@@ -207,41 +262,36 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
{
int err, i, lnum, count;
- struct ubi_volume *vol;
- struct ubi_device *ubi;
-
- dbg_gen("erase %llu bytes at offset %llu", (unsigned long long)instr->len,
- (unsigned long long)instr->addr);
+ struct gluebi_device *gluebi;
if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
return -EINVAL;
-
if (instr->len < 0 || instr->addr + instr->len > mtd->size)
return -EINVAL;
-
if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
return -EINVAL;
lnum = mtd_div_by_eb(instr->addr, mtd);
count = mtd_div_by_eb(instr->len, mtd);
- vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
- ubi = vol->ubi;
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
- if (ubi->ro_mode)
+ if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- for (i = 0; i < count; i++) {
- err = ubi_eba_unmap_leb(ubi, vol, lnum + i);
+ for (i = 0; i < count - 1; i++) {
+ err = ubi_leb_unmap(gluebi->desc, lnum + i);
if (err)
goto out_err;
}
-
/*
* MTD erase operations are synchronous, so we have to make sure the
* physical eraseblock is wiped out.
+ *
+ * Thus, perform leb_erase instead of leb_unmap operation - leb_erase
+ * will wait for the end of operations
*/
- err = ubi_wl_flush(ubi);
+ err = ubi_leb_erase(gluebi->desc, lnum + i);
if (err)
goto out_err;
@@ -256,28 +306,39 @@ out_err:
}
/**
- * ubi_create_gluebi - initialize gluebi for an UBI volume.
- * @ubi: UBI device description object
- * @vol: volume description object
+ * gluebi_create - create a gluebi device for an UBI volume.
+ * @di: UBI device description object
+ * @vi: UBI volume description object
*
- * This function is called when an UBI volume is created in order to create
+ * This function is called when a new UBI volume is created in order to create
* corresponding fake MTD device. Returns zero in case of success and a
* negative error code in case of failure.
*/
-int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
+static int gluebi_create(struct ubi_device_info *di,
+ struct ubi_volume_info *vi)
{
- struct mtd_info *mtd = &vol->gluebi_mtd;
+ struct gluebi_device *gluebi, *g;
+ struct mtd_info *mtd;
- mtd->name = kmemdup(vol->name, vol->name_len + 1, GFP_KERNEL);
- if (!mtd->name)
+ gluebi = kzalloc(sizeof(struct gluebi_device), GFP_KERNEL);
+ if (!gluebi)
return -ENOMEM;
+ mtd = &gluebi->mtd;
+ mtd->name = kmemdup(vi->name, vi->name_len + 1, GFP_KERNEL);
+ if (!mtd->name) {
+ kfree(gluebi);
+ return -ENOMEM;
+ }
+
+ gluebi->vol_id = vi->vol_id;
+ gluebi->ubi_num = vi->ubi_num;
mtd->type = MTD_UBIVOLUME;
- if (!ubi->ro_mode)
+ if (!di->ro_mode)
mtd->flags = MTD_WRITEABLE;
- mtd->writesize = ubi->min_io_size;
mtd->owner = THIS_MODULE;
- mtd->erasesize = vol->usable_leb_size;
+ mtd->writesize = di->min_io_size;
+ mtd->erasesize = vi->usable_leb_size;
mtd->read = gluebi_read;
mtd->write = gluebi_write;
mtd->erase = gluebi_erase;
@@ -285,60 +346,196 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
mtd->put_device = gluebi_put_device;
/*
- * In case of dynamic volume, MTD device size is just volume size. In
+ * In case of dynamic a volume, MTD device size is just volume size. In
* case of a static volume the size is equivalent to the amount of data
* bytes.
*/
- if (vol->vol_type == UBI_DYNAMIC_VOLUME)
- mtd->size = (long long)vol->usable_leb_size * vol->reserved_pebs;
+ if (vi->vol_type == UBI_DYNAMIC_VOLUME)
+ mtd->size = (unsigned long long)vi->usable_leb_size * vi->size;
else
- mtd->size = vol->used_bytes;
+ mtd->size = vi->used_bytes;
+
+ /* Just a sanity check - make sure this gluebi device does not exist */
+ mutex_lock(&devices_mutex);
+ g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (g)
+ err_msg("gluebi MTD device %d form UBI device %d volume %d "
+ "already exists", g->mtd.index, vi->ubi_num,
+ vi->vol_id);
+ mutex_unlock(&devices_mutex);
if (add_mtd_device(mtd)) {
- ubi_err("cannot not add MTD device");
+ err_msg("cannot add MTD device");
kfree(mtd->name);
+ kfree(gluebi);
return -ENFILE;
}
- dbg_gen("added mtd%d (\"%s\"), size %llu, EB size %u",
- mtd->index, mtd->name, (unsigned long long)mtd->size, mtd->erasesize);
+ mutex_lock(&devices_mutex);
+ list_add_tail(&gluebi->list, &gluebi_devices);
+ mutex_unlock(&devices_mutex);
return 0;
}
/**
- * ubi_destroy_gluebi - close gluebi for an UBI volume.
- * @vol: volume description object
+ * gluebi_remove - remove a gluebi device.
+ * @vi: UBI volume description object
*
- * This function is called when an UBI volume is removed in order to remove
+ * This function is called when an UBI volume is removed and it removes
* corresponding fake MTD device. Returns zero in case of success and a
* negative error code in case of failure.
*/
-int ubi_destroy_gluebi(struct ubi_volume *vol)
+static int gluebi_remove(struct ubi_volume_info *vi)
{
- int err;
- struct mtd_info *mtd = &vol->gluebi_mtd;
+ int err = 0;
+ struct mtd_info *mtd;
+ struct gluebi_device *gluebi;
+
+ mutex_lock(&devices_mutex);
+ gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (!gluebi) {
+ err_msg("got remove notification for unknown UBI device %d "
+ "volume %d", vi->ubi_num, vi->vol_id);
+ err = -ENOENT;
+ } else if (gluebi->refcnt)
+ err = -EBUSY;
+ else
+ list_del(&gluebi->list);
+ mutex_unlock(&devices_mutex);
+ if (err)
+ return err;
- dbg_gen("remove mtd%d", mtd->index);
+ mtd = &gluebi->mtd;
err = del_mtd_device(mtd);
- if (err)
+ if (err) {
+ err_msg("cannot remove fake MTD device %d, UBI device %d, "
+ "volume %d, error %d", mtd->index, gluebi->ubi_num,
+ gluebi->vol_id, err);
+ mutex_lock(&devices_mutex);
+ list_add_tail(&gluebi->list, &gluebi_devices);
+ mutex_unlock(&devices_mutex);
return err;
+ }
+
kfree(mtd->name);
+ kfree(gluebi);
return 0;
}
/**
- * ubi_gluebi_updated - UBI volume was updated notifier.
- * @vol: volume description object
+ * gluebi_updated - UBI volume was updated notifier.
+ * @vi: volume info structure
*
- * This function is called every time an UBI volume is updated. This function
- * does nothing if volume @vol is dynamic, and changes MTD device size if the
+ * This function is called every time an UBI volume is updated. It does nothing
+ * if te volume @vol is dynamic, and changes MTD device size if the
* volume is static. This is needed because static volumes cannot be read past
- * data they contain.
+ * data they contain. This function returns zero in case of success and a
+ * negative error code in case of error.
+ */
+static int gluebi_updated(struct ubi_volume_info *vi)
+{
+ struct gluebi_device *gluebi;
+
+ mutex_lock(&devices_mutex);
+ gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (!gluebi) {
+ mutex_unlock(&devices_mutex);
+ err_msg("got update notification for unknown UBI device %d "
+ "volume %d", vi->ubi_num, vi->vol_id);
+ return -ENOENT;
+ }
+
+ if (vi->vol_type == UBI_STATIC_VOLUME)
+ gluebi->mtd.size = vi->used_bytes;
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+/**
+ * gluebi_resized - UBI volume was re-sized notifier.
+ * @vi: volume info structure
+ *
+ * This function is called every time an UBI volume is re-size. It changes the
+ * corresponding fake MTD device size. This function returns zero in case of
+ * success and a negative error code in case of error.
+ */
+static int gluebi_resized(struct ubi_volume_info *vi)
+{
+ struct gluebi_device *gluebi;
+
+ mutex_lock(&devices_mutex);
+ gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (!gluebi) {
+ mutex_unlock(&devices_mutex);
+ err_msg("got update notification for unknown UBI device %d "
+ "volume %d", vi->ubi_num, vi->vol_id);
+ return -ENOENT;
+ }
+ gluebi->mtd.size = vi->used_bytes;
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+/**
+ * gluebi_notify - UBI notification handler.
+ * @nb: registered notifier block
+ * @l: notification type
+ * @ptr: pointer to the &struct ubi_notification object
*/
-void ubi_gluebi_updated(struct ubi_volume *vol)
+static int gluebi_notify(struct notifier_block *nb, unsigned long l,
+ void *ns_ptr)
{
- struct mtd_info *mtd = &vol->gluebi_mtd;
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (l) {
+ case UBI_VOLUME_ADDED:
+ gluebi_create(&nt->di, &nt->vi);
+ break;
+ case UBI_VOLUME_REMOVED:
+ gluebi_remove(&nt->vi);
+ break;
+ case UBI_VOLUME_RESIZED:
+ gluebi_resized(&nt->vi);
+ break;
+ case UBI_VOLUME_UPDATED:
+ gluebi_updated(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
- if (vol->vol_type == UBI_STATIC_VOLUME)
- mtd->size = vol->used_bytes;
+static struct notifier_block gluebi_notifier = {
+ .notifier_call = gluebi_notify,
+};
+
+static int __init ubi_gluebi_init(void)
+{
+ return ubi_register_volume_notifier(&gluebi_notifier, 0);
}
+
+static void __exit ubi_gluebi_exit(void)
+{
+ struct gluebi_device *gluebi, *g;
+
+ list_for_each_entry_safe(gluebi, g, &gluebi_devices, list) {
+ int err;
+ struct mtd_info *mtd = &gluebi->mtd;
+
+ err = del_mtd_device(mtd);
+ if (err)
+ err_msg("error %d while removing gluebi MTD device %d, "
+ "UBI device %d, volume %d - ignoring", err,
+ mtd->index, gluebi->ubi_num, gluebi->vol_id);
+ kfree(mtd->name);
+ kfree(gluebi);
+ }
+ ubi_unregister_volume_notifier(&gluebi_notifier);
+}
+
+module_init(ubi_gluebi_init);
+module_exit(ubi_gluebi_exit);
+MODULE_DESCRIPTION("MTD emulation layer over UBI volumes");
+MODULE_AUTHOR("Artem Bityutskiy, Joern Engel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index fe81039f2a7..4cb69925d8d 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -98,15 +98,12 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
const struct ubi_vid_hdr *vid_hdr);
-static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
- int len);
#else
#define paranoid_check_not_bad(ubi, pnum) 0
#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0
#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
-#define paranoid_check_all_ff(ubi, pnum, offset, len) 0
#endif
/**
@@ -242,7 +239,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
return err > 0 ? -EINVAL : err;
/* The area we are writing to has to contain all 0xFF bytes */
- err = paranoid_check_all_ff(ubi, pnum, offset, len);
+ err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
if (err)
return err > 0 ? -EINVAL : err;
@@ -269,8 +266,8 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
addr = (loff_t)pnum * ubi->peb_size + offset;
err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf);
if (err) {
- ubi_err("error %d while writing %d bytes to PEB %d:%d, written"
- " %zd bytes", err, len, pnum, offset, written);
+ ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
+ "%zd bytes", err, len, pnum, offset, written);
ubi_dbg_dump_stack();
} else
ubi_assert(written == len);
@@ -348,7 +345,7 @@ retry:
return -EIO;
}
- err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+ err = ubi_dbg_check_all_ff(ubi, pnum, 0, ubi->peb_size);
if (err)
return err > 0 ? -EINVAL : err;
@@ -457,6 +454,54 @@ out:
}
/**
+ * nor_erase_prepare - prepare a NOR flash PEB for erasure.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to prepare
+ *
+ * NOR flash, or at least some of them, have peculiar embedded PEB erasure
+ * algorithm: the PEB is first filled with zeroes, then it is erased. And
+ * filling with zeroes starts from the end of the PEB. This was observed with
+ * Spansion S29GL512N NOR flash.
+ *
+ * This means that in case of a power cut we may end up with intact data at the
+ * beginning of the PEB, and all zeroes at the end of PEB. In other words, the
+ * EC and VID headers are OK, but a large chunk of data at the end of PEB is
+ * zeroed. This makes UBI mistakenly treat this PEB as used and associate it
+ * with an LEB, which leads to subsequent failures (e.g., UBIFS fails).
+ *
+ * This function is called before erasing NOR PEBs and it zeroes out EC and VID
+ * magic numbers in order to invalidate them and prevent the failures. Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
+{
+ int err;
+ size_t written;
+ loff_t addr;
+ uint32_t data = 0;
+
+ addr = (loff_t)pnum * ubi->peb_size;
+ err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if (err) {
+ ubi_err("error %d while writing 4 bytes to PEB %d:%d, written "
+ "%zd bytes", err, pnum, 0, written);
+ ubi_dbg_dump_stack();
+ return err;
+ }
+
+ addr += ubi->vid_hdr_aloffset;
+ err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if (err) {
+ ubi_err("error %d while writing 4 bytes to PEB %d:%d, written "
+ "%zd bytes", err, pnum, ubi->vid_hdr_aloffset, written);
+ ubi_dbg_dump_stack();
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ubi_io_sync_erase - synchronously erase a physical eraseblock.
* @ubi: UBI device description object
* @pnum: physical eraseblock number to erase
@@ -487,6 +532,12 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
return -EROFS;
}
+ if (ubi->nor_flash) {
+ err = nor_erase_prepare(ubi, pnum);
+ if (err)
+ return err;
+ }
+
if (torture) {
ret = torture_peb(ubi, pnum);
if (ret < 0)
@@ -670,16 +721,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (read_err != -EBADMSG &&
check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
/* The physical eraseblock is supposedly empty */
-
- /*
- * The below is just a paranoid check, it has to be
- * compiled out if paranoid checks are disabled.
- */
- err = paranoid_check_all_ff(ubi, pnum, 0,
- ubi->peb_size);
- if (err)
- return err > 0 ? UBI_IO_BAD_EC_HDR : err;
-
if (verbose)
ubi_warn("no EC header found at PEB %d, "
"only 0xFF bytes", pnum);
@@ -755,6 +796,7 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
ec_hdr->version = UBI_VERSION;
ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
+ ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
ec_hdr->hdr_crc = cpu_to_be32(crc);
@@ -902,7 +944,7 @@ bad:
* o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
* and corrected by the flash driver; this is harmless but may indicate that
* this eraseblock may become bad soon;
- * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC
+ * o %UBI_IO_BAD_VID_HDR if the volume identifier header is corrupted (a CRC
* error detected);
* o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID
* header there);
@@ -950,16 +992,6 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (read_err != -EBADMSG &&
check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
/* The physical eraseblock is supposedly free */
-
- /*
- * The below is just a paranoid check, it has to be
- * compiled out if paranoid checks are disabled.
- */
- err = paranoid_check_all_ff(ubi, pnum, ubi->leb_start,
- ubi->leb_size);
- if (err)
- return err > 0 ? UBI_IO_BAD_VID_HDR : err;
-
if (verbose)
ubi_warn("no VID header found at PEB %d, "
"only 0xFF bytes", pnum);
@@ -1233,7 +1265,7 @@ exit:
}
/**
- * paranoid_check_all_ff - check that a region of flash is empty.
+ * ubi_dbg_check_all_ff - check that a region of flash is empty.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @offset: the starting offset within the physical eraseblock to check
@@ -1243,8 +1275,7 @@ exit:
* @offset of the physical eraseblock @pnum, %1 if not, and a negative error
* code if an error occurred.
*/
-static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
- int len)
+int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
{
size_t read;
int err;
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 4abbe573fa4..88a72e9c8be 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -26,6 +26,24 @@
#include "ubi.h"
/**
+ * ubi_do_get_device_info - get information about UBI device.
+ * @ubi: UBI device description object
+ * @di: the information is stored here
+ *
+ * This function is the same as 'ubi_get_device_info()', but it assumes the UBI
+ * device is locked and cannot disappear.
+ */
+void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di)
+{
+ di->ubi_num = ubi->ubi_num;
+ di->leb_size = ubi->leb_size;
+ di->min_io_size = ubi->min_io_size;
+ di->ro_mode = ubi->ro_mode;
+ di->cdev = ubi->cdev.dev;
+}
+EXPORT_SYMBOL_GPL(ubi_do_get_device_info);
+
+/**
* ubi_get_device_info - get information about UBI device.
* @ubi_num: UBI device number
* @di: the information is stored here
@@ -39,33 +57,24 @@ int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return -EINVAL;
-
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
-
- di->ubi_num = ubi->ubi_num;
- di->leb_size = ubi->leb_size;
- di->min_io_size = ubi->min_io_size;
- di->ro_mode = ubi->ro_mode;
- di->cdev = ubi->cdev.dev;
-
+ ubi_do_get_device_info(ubi, di);
ubi_put_device(ubi);
return 0;
}
EXPORT_SYMBOL_GPL(ubi_get_device_info);
/**
- * ubi_get_volume_info - get information about UBI volume.
- * @desc: volume descriptor
+ * ubi_do_get_volume_info - get information about UBI volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
* @vi: the information is stored here
*/
-void ubi_get_volume_info(struct ubi_volume_desc *desc,
- struct ubi_volume_info *vi)
+void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_volume_info *vi)
{
- const struct ubi_volume *vol = desc->vol;
- const struct ubi_device *ubi = vol->ubi;
-
vi->vol_id = vol->vol_id;
vi->ubi_num = ubi->ubi_num;
vi->size = vol->reserved_pebs;
@@ -79,6 +88,17 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc,
vi->name = vol->name;
vi->cdev = vol->cdev.dev;
}
+
+/**
+ * ubi_get_volume_info - get information about UBI volume.
+ * @desc: volume descriptor
+ * @vi: the information is stored here
+ */
+void ubi_get_volume_info(struct ubi_volume_desc *desc,
+ struct ubi_volume_info *vi)
+{
+ ubi_do_get_volume_info(desc->vol->ubi, desc->vol, vi);
+}
EXPORT_SYMBOL_GPL(ubi_get_volume_info);
/**
@@ -106,7 +126,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
struct ubi_device *ubi;
struct ubi_volume *vol;
- dbg_gen("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
+ dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode);
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return ERR_PTR(-EINVAL);
@@ -196,6 +216,8 @@ out_free:
kfree(desc);
out_put_ubi:
ubi_put_device(ubi);
+ dbg_err("cannot open device %d, volume %d, error %d",
+ ubi_num, vol_id, err);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(ubi_open_volume);
@@ -215,7 +237,7 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
struct ubi_device *ubi;
struct ubi_volume_desc *ret;
- dbg_gen("open volume %s, mode %d", name, mode);
+ dbg_gen("open device %d, volume %s, mode %d", ubi_num, name, mode);
if (!name)
return ERR_PTR(-EINVAL);
@@ -266,7 +288,8 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- dbg_gen("close volume %d, mode %d", vol->vol_id, desc->mode);
+ dbg_gen("close device %d, volume %d, mode %d",
+ ubi->ubi_num, vol->vol_id, desc->mode);
spin_lock(&ubi->volumes_lock);
switch (desc->mode) {
@@ -558,7 +581,7 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
EXPORT_SYMBOL_GPL(ubi_leb_unmap);
/**
- * ubi_leb_map - map logical erasblock to a physical eraseblock.
+ * ubi_leb_map - map logical eraseblock to a physical eraseblock.
* @desc: volume descriptor
* @lnum: logical eraseblock number
* @dtype: expected data type
@@ -656,3 +679,59 @@ int ubi_sync(int ubi_num)
return 0;
}
EXPORT_SYMBOL_GPL(ubi_sync);
+
+BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
+
+/**
+ * ubi_register_volume_notifier - register a volume notifier.
+ * @nb: the notifier description object
+ * @ignore_existing: if non-zero, do not send "added" notification for all
+ * already existing volumes
+ *
+ * This function registers a volume notifier, which means that
+ * 'nb->notifier_call()' will be invoked when an UBI volume is created,
+ * removed, re-sized, re-named, or updated. The first argument of the function
+ * is the notification type. The second argument is pointer to a
+ * &struct ubi_notification object which describes the notification event.
+ * Using UBI API from the volume notifier is prohibited.
+ *
+ * This function returns zero in case of success and a negative error code
+ * in case of failure.
+ */
+int ubi_register_volume_notifier(struct notifier_block *nb,
+ int ignore_existing)
+{
+ int err;
+
+ err = blocking_notifier_chain_register(&ubi_notifiers, nb);
+ if (err != 0)
+ return err;
+ if (ignore_existing)
+ return 0;
+
+ /*
+ * We are going to walk all UBI devices and all volumes, and
+ * notify the user about existing volumes by the %UBI_VOLUME_ADDED
+ * event. We have to lock the @ubi_devices_mutex to make sure UBI
+ * devices do not disappear.
+ */
+ mutex_lock(&ubi_devices_mutex);
+ ubi_enumerate_volumes(nb);
+ mutex_unlock(&ubi_devices_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_register_volume_notifier);
+
+/**
+ * ubi_unregister_volume_notifier - unregister the volume notifier.
+ * @nb: the notifier description object
+ *
+ * This function unregisters volume notifier @nm and returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_unregister_volume_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&ubi_notifiers, nb);
+}
+EXPORT_SYMBOL_GPL(ubi_unregister_volume_notifier);
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index c3d653ba5ca..b847745394b 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -757,6 +757,8 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
si->is_empty = 0;
if (!ec_corr) {
+ int image_seq;
+
/* Make sure UBI version is OK */
if (ech->version != UBI_VERSION) {
ubi_err("this UBI version is %d, image version is %d",
@@ -778,6 +780,29 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
ubi_dbg_dump_ec_hdr(ech);
return -EINVAL;
}
+
+ /*
+ * Make sure that all PEBs have the same image sequence number.
+ * This allows us to detect situations when users flash UBI
+ * images incorrectly, so that the flash has the new UBI image
+ * and leftovers from the old one. This feature was added
+ * relatively recently, and the sequence number was always
+ * zero, because old UBI implementations always set it to zero.
+ * For this reasons, we do not panic if some PEBs have zero
+ * sequence number, while other PEBs have non-zero sequence
+ * number.
+ */
+ image_seq = be32_to_cpu(ech->image_seq);
+ if (!si->image_seq_set) {
+ ubi->image_seq = image_seq;
+ si->image_seq_set = 1;
+ } else if (ubi->image_seq && ubi->image_seq != image_seq) {
+ ubi_err("bad image sequence number %d in PEB %d, "
+ "expected %d", image_seq, pnum, ubi->image_seq);
+ ubi_dbg_dump_ec_hdr(ech);
+ return -EINVAL;
+ }
+
}
/* OK, we've done with the EC header, let's look at the VID header */
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index 61df208e2f2..1017cf12def 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -102,6 +102,7 @@ struct ubi_scan_volume {
* @mean_ec: mean erase counter value
* @ec_sum: a temporary variable used when calculating @mean_ec
* @ec_count: a temporary variable used when calculating @mean_ec
+ * @image_seq_set: indicates @ubi->image_seq is known
*
* This data structure contains the result of scanning and may be used by other
* UBI sub-systems to build final UBI data structures, further error-recovery
@@ -124,6 +125,7 @@ struct ubi_scan_info {
int mean_ec;
uint64_t ec_sum;
int ec_count;
+ int image_seq_set;
};
struct ubi_device;
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 8419fdccc79..503ea9b2730 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -129,6 +129,7 @@ enum {
* @ec: the erase counter
* @vid_hdr_offset: where the VID header starts
* @data_offset: where the user data start
+ * @image_seq: image sequence number
* @padding2: reserved for future, zeroes
* @hdr_crc: erase counter header CRC checksum
*
@@ -144,6 +145,14 @@ enum {
* volume identifier header and user data, relative to the beginning of the
* physical eraseblock. These values have to be the same for all physical
* eraseblocks.
+ *
+ * The @image_seq field is used to validate a UBI image that has been prepared
+ * for a UBI device. The @image_seq value can be any value, but it must be the
+ * same on all eraseblocks. UBI will ensure that all new erase counter headers
+ * also contain this value, and will check the value when scanning at start-up.
+ * One way to make use of @image_seq is to increase its value by one every time
+ * an image is flashed over an existing image, then, if the flashing does not
+ * complete, UBI will detect the error when scanning.
*/
struct ubi_ec_hdr {
__be32 magic;
@@ -152,7 +161,8 @@ struct ubi_ec_hdr {
__be64 ec; /* Warning: the current limit is 31-bit anyway! */
__be32 vid_hdr_offset;
__be32 data_offset;
- __u8 padding2[36];
+ __be32 image_seq;
+ __u8 padding2[32];
__be32 hdr_crc;
} __attribute__ ((packed));
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c055511bb1b..6a5fe963378 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -36,6 +36,7 @@
#include <linux/device.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
+#include <linux/notifier.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/ubi.h>
@@ -100,6 +101,28 @@ enum {
UBI_IO_BITFLIPS
};
+/*
+ * Return codes of the 'ubi_eba_copy_leb()' function.
+ *
+ * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source
+ * PEB was put meanwhile, or there is I/O on the source PEB
+ * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source
+ * PEB
+ * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target
+ * PEB
+ * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target
+ * PEB
+ * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
+ * target PEB
+ */
+enum {
+ MOVE_CANCEL_RACE = 1,
+ MOVE_SOURCE_RD_ERR,
+ MOVE_TARGET_RD_ERR,
+ MOVE_TARGET_WR_ERR,
+ MOVE_CANCEL_BITFLIPS,
+};
+
/**
* struct ubi_wl_entry - wear-leveling entry.
* @u.rb: link in the corresponding (free/used) RB-tree
@@ -208,10 +231,6 @@ struct ubi_volume_desc;
* @changing_leb: %1 if the atomic LEB change ioctl command is in progress
* @direct_writes: %1 if direct writes are enabled for this volume
*
- * @gluebi_desc: gluebi UBI volume descriptor
- * @gluebi_refcount: reference count of the gluebi MTD device
- * @gluebi_mtd: MTD device description object of the gluebi MTD device
- *
* The @corrupted field indicates that the volume's contents is corrupted.
* Since UBI protects only static volumes, this field is not relevant to
* dynamic volumes - it is user's responsibility to assure their data
@@ -255,17 +274,6 @@ struct ubi_volume {
unsigned int updating:1;
unsigned int changing_leb:1;
unsigned int direct_writes:1;
-
-#ifdef CONFIG_MTD_UBI_GLUEBI
- /*
- * Gluebi-related stuff may be compiled out.
- * Note: this should not be built into UBI but should be a separate
- * ubimtd driver which works on top of UBI and emulates MTD devices.
- */
- struct ubi_volume_desc *gluebi_desc;
- int gluebi_refcount;
- struct mtd_info gluebi_mtd;
-#endif
};
/**
@@ -293,6 +301,7 @@ struct ubi_wl_entry;
* @vol->readers, @vol->writers, @vol->exclusive,
* @vol->ref_count, @vol->mapping and @vol->eba_tbl.
* @ref_count: count of references on the UBI device
+ * @image_seq: image sequence number recorded on EC headers
*
* @rsvd_pebs: count of reserved physical eraseblocks
* @avail_pebs: count of available physical eraseblocks
@@ -305,9 +314,9 @@ struct ubi_wl_entry;
* @vtbl_slots: how many slots are available in the volume table
* @vtbl_size: size of the volume table in bytes
* @vtbl: in-RAM volume table copy
- * @volumes_mutex: protects on-flash volume table and serializes volume
- * changes, like creation, deletion, update, re-size,
- * re-name and set property
+ * @device_mutex: protects on-flash volume table and serializes volume
+ * creation, deletion, update, re-size, re-name and set
+ * property
*
* @max_ec: current highest erase counter value
* @mean_ec: current mean erase counter value
@@ -318,14 +327,15 @@ struct ubi_wl_entry;
* @alc_mutex: serializes "atomic LEB change" operations
*
* @used: RB-tree of used physical eraseblocks
+ * @erroneous: RB-tree of erroneous used physical eraseblocks
* @free: RB-tree of free physical eraseblocks
* @scrub: RB-tree of physical eraseblocks which need scrubbing
* @pq: protection queue (contain physical eraseblocks which are temporarily
* protected from the wear-leveling worker)
* @pq_head: protection queue head
* @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
- * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works
- * fields
+ * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
+ * @erroneous, and @erroneous_peb_count fields
* @move_mutex: serializes eraseblock moves
* @work_sem: synchronizes the WL worker with use tasks
* @wl_scheduled: non-zero if the wear-leveling was scheduled
@@ -339,12 +349,15 @@ struct ubi_wl_entry;
* @bgt_thread: background thread description object
* @thread_enabled: if the background thread is enabled
* @bgt_name: background thread name
+ * @reboot_notifier: notifier to terminate background thread before rebooting
*
* @flash_size: underlying MTD device size (in bytes)
* @peb_count: count of physical eraseblocks on the MTD device
* @peb_size: physical eraseblock size
* @bad_peb_count: count of bad physical eraseblocks
* @good_peb_count: count of good physical eraseblocks
+ * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous
+ * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks
* @min_io_size: minimal input/output unit size of the underlying MTD device
* @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers
* @ro_mode: if the UBI device is in read-only mode
@@ -360,13 +373,13 @@ struct ubi_wl_entry;
* @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
* @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
* not
+ * @nor_flash: non-zero if working on top of NOR flash
* @mtd: MTD device descriptor
*
* @peb_buf1: a buffer of PEB size used for different purposes
* @peb_buf2: another buffer of PEB size used for different purposes
* @buf_mutex: protects @peb_buf1 and @peb_buf2
* @ckvol_mutex: serializes static volume checking when opening
- * @mult_mutex: serializes operations on multiple volumes, like re-naming
* @dbg_peb_buf: buffer of PEB size used for debugging
* @dbg_buf_mutex: protects @dbg_peb_buf
*/
@@ -379,6 +392,7 @@ struct ubi_device {
struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
spinlock_t volumes_lock;
int ref_count;
+ int image_seq;
int rsvd_pebs;
int avail_pebs;
@@ -389,7 +403,7 @@ struct ubi_device {
int vtbl_slots;
int vtbl_size;
struct ubi_vtbl_record *vtbl;
- struct mutex volumes_mutex;
+ struct mutex device_mutex;
int max_ec;
/* Note, mean_ec is not updated run-time - should be fixed */
@@ -403,6 +417,7 @@ struct ubi_device {
/* Wear-leveling sub-system's stuff */
struct rb_root used;
+ struct rb_root erroneous;
struct rb_root free;
struct rb_root scrub;
struct list_head pq[UBI_PROT_QUEUE_LEN];
@@ -420,6 +435,7 @@ struct ubi_device {
struct task_struct *bgt_thread;
int thread_enabled;
char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
+ struct notifier_block reboot_notifier;
/* I/O sub-system's stuff */
long long flash_size;
@@ -427,6 +443,8 @@ struct ubi_device {
int peb_size;
int bad_peb_count;
int good_peb_count;
+ int erroneous_peb_count;
+ int max_erroneous;
int min_io_size;
int hdrs_min_io_size;
int ro_mode;
@@ -437,15 +455,15 @@ struct ubi_device {
int vid_hdr_offset;
int vid_hdr_aloffset;
int vid_hdr_shift;
- int bad_allowed;
+ unsigned int bad_allowed:1;
+ unsigned int nor_flash:1;
struct mtd_info *mtd;
void *peb_buf1;
void *peb_buf2;
struct mutex buf_mutex;
struct mutex ckvol_mutex;
- struct mutex mult_mutex;
-#ifdef CONFIG_MTD_UBI_DEBUG
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
void *dbg_peb_buf;
struct mutex dbg_buf_mutex;
#endif
@@ -457,6 +475,7 @@ extern const struct file_operations ubi_cdev_operations;
extern const struct file_operations ubi_vol_cdev_operations;
extern struct class *ubi_class;
extern struct mutex ubi_devices_mutex;
+extern struct blocking_notifier_head ubi_notifiers;
/* vtbl.c */
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
@@ -489,17 +508,6 @@ int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
int ubi_check_volume(struct ubi_device *ubi, int vol_id);
void ubi_calculate_reserved(struct ubi_device *ubi);
-/* gluebi.c */
-#ifdef CONFIG_MTD_UBI_GLUEBI
-int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol);
-int ubi_destroy_gluebi(struct ubi_volume *vol);
-void ubi_gluebi_updated(struct ubi_volume *vol);
-#else
-#define ubi_create_gluebi(ubi, vol) 0
-#define ubi_destroy_gluebi(vol) 0
-#define ubi_gluebi_updated(vol)
-#endif
-
/* eba.c */
int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum);
@@ -549,6 +557,16 @@ struct ubi_device *ubi_get_device(int ubi_num);
void ubi_put_device(struct ubi_device *ubi);
struct ubi_device *ubi_get_by_major(int major);
int ubi_major2num(int major);
+int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
+ int ntype);
+int ubi_notify_all(struct ubi_device *ubi, int ntype,
+ struct notifier_block *nb);
+int ubi_enumerate_volumes(struct notifier_block *nb);
+
+/* kapi.c */
+void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
+void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_volume_info *vi);
/*
* ubi_rb_for_each_entry - walk an RB-tree.
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 6b4d1ae891a..74fdc40c862 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -68,10 +68,10 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
sizeof(struct ubi_vtbl_record));
vtbl_rec.upd_marker = 1;
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
- mutex_unlock(&ubi->volumes_mutex);
vol->upd_marker = 1;
+ mutex_unlock(&ubi->device_mutex);
return err;
}
@@ -109,10 +109,10 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
vol->last_eb_bytes = vol->usable_leb_size;
}
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
- mutex_unlock(&ubi->volumes_mutex);
vol->upd_marker = 0;
+ mutex_unlock(&ubi->device_mutex);
return err;
}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index df5483562b7..ab64cb56df6 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -198,7 +198,7 @@ static void volume_sysfs_close(struct ubi_volume *vol)
* %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
* and saves it in @req->vol_id. Returns zero in case of success and a negative
* error code in case of failure. Note, the caller has to have the
- * @ubi->volumes_mutex locked.
+ * @ubi->device_mutex locked.
*/
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
{
@@ -232,8 +232,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
req->vol_id = vol_id;
}
- dbg_gen("volume ID %d, %llu bytes, type %d, name %s",
- vol_id, (unsigned long long)req->bytes,
+ dbg_gen("create device %d, volume %d, %llu bytes, type %d, name %s",
+ ubi->ubi_num, vol_id, (unsigned long long)req->bytes,
(int)req->vol_type, req->name);
/* Ensure that this volume does not exist */
@@ -317,10 +317,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
goto out_mapping;
}
- err = ubi_create_gluebi(ubi, vol);
- if (err)
- goto out_cdev;
-
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
vol->dev.devt = dev;
@@ -330,7 +326,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
err = device_register(&vol->dev);
if (err) {
ubi_err("cannot register device");
- goto out_gluebi;
+ goto out_cdev;
}
err = volume_sysfs_init(ubi, vol);
@@ -358,7 +354,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
ubi->vol_count += 1;
spin_unlock(&ubi->volumes_lock);
- err = paranoid_check_volumes(ubi);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
+ if (paranoid_check_volumes(ubi))
+ dbg_err("check failed while creating volume %d", vol_id);
return err;
out_sysfs:
@@ -373,10 +371,6 @@ out_sysfs:
do_free = 0;
get_device(&vol->dev);
volume_sysfs_close(vol);
-out_gluebi:
- if (ubi_destroy_gluebi(vol))
- dbg_err("cannot destroy gluebi for volume %d:%d",
- ubi->ubi_num, vol_id);
out_cdev:
cdev_del(&vol->cdev);
out_mapping:
@@ -403,7 +397,7 @@ out_unlock:
*
* This function removes volume described by @desc. The volume has to be opened
* in "exclusive" mode. Returns zero in case of success and a negative error
- * code in case of failure. The caller has to have the @ubi->volumes_mutex
+ * code in case of failure. The caller has to have the @ubi->device_mutex
* locked.
*/
int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
@@ -412,7 +406,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
struct ubi_device *ubi = vol->ubi;
int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs;
- dbg_gen("remove UBI volume %d", vol_id);
+ dbg_gen("remove device %d, volume %d", ubi->ubi_num, vol_id);
ubi_assert(desc->mode == UBI_EXCLUSIVE);
ubi_assert(vol == ubi->volumes[vol_id]);
@@ -431,10 +425,6 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
- err = ubi_destroy_gluebi(vol);
- if (err)
- goto out_err;
-
if (!no_vtbl) {
err = ubi_change_vtbl_record(ubi, vol_id, NULL);
if (err)
@@ -465,8 +455,10 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
- if (!no_vtbl)
- err = paranoid_check_volumes(ubi);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
+ if (!no_vtbl && paranoid_check_volumes(ubi))
+ dbg_err("check failed while removing volume %d", vol_id);
+
return err;
out_err:
@@ -485,7 +477,7 @@ out_unlock:
*
* This function re-sizes the volume and returns zero in case of success, and a
* negative error code in case of failure. The caller has to have the
- * @ubi->volumes_mutex locked.
+ * @ubi->device_mutex locked.
*/
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
{
@@ -498,8 +490,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (ubi->ro_mode)
return -EROFS;
- dbg_gen("re-size volume %d to from %d to %d PEBs",
- vol_id, vol->reserved_pebs, reserved_pebs);
+ dbg_gen("re-size device %d, volume %d to from %d to %d PEBs",
+ ubi->ubi_num, vol_id, vol->reserved_pebs, reserved_pebs);
if (vol->vol_type == UBI_STATIC_VOLUME &&
reserved_pebs < vol->used_ebs) {
@@ -587,7 +579,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
(long long)vol->used_ebs * vol->usable_leb_size;
}
- err = paranoid_check_volumes(ubi);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
+ if (paranoid_check_volumes(ubi))
+ dbg_err("check failed while re-sizing volume %d", vol_id);
return err;
out_acc:
@@ -632,11 +626,12 @@ int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
vol->name_len = re->new_name_len;
memcpy(vol->name, re->new_name, re->new_name_len + 1);
spin_unlock(&ubi->volumes_lock);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_RENAMED);
}
}
- if (!err)
- err = paranoid_check_volumes(ubi);
+ if (!err && paranoid_check_volumes(ubi))
+ ;
return err;
}
@@ -667,10 +662,6 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
return err;
}
- err = ubi_create_gluebi(ubi, vol);
- if (err)
- goto out_cdev;
-
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
vol->dev.devt = dev;
@@ -678,21 +669,19 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
if (err)
- goto out_gluebi;
+ goto out_cdev;
err = volume_sysfs_init(ubi, vol);
if (err) {
cdev_del(&vol->cdev);
- err = ubi_destroy_gluebi(vol);
volume_sysfs_close(vol);
return err;
}
- err = paranoid_check_volumes(ubi);
+ if (paranoid_check_volumes(ubi))
+ dbg_err("check failed while adding volume %d", vol_id);
return err;
-out_gluebi:
- err = ubi_destroy_gluebi(vol);
out_cdev:
cdev_del(&vol->cdev);
return err;
@@ -708,12 +697,9 @@ out_cdev:
*/
void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
- int err;
-
dbg_gen("free volume %d", vol->vol_id);
ubi->volumes[vol->vol_id] = NULL;
- err = ubi_destroy_gluebi(vol);
cdev_del(&vol->cdev);
volume_sysfs_close(vol);
}
@@ -868,6 +854,7 @@ fail:
if (vol)
ubi_dbg_dump_vol_info(vol);
ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+ dump_stack();
spin_unlock(&ubi->volumes_lock);
return -EINVAL;
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 891534f8210..600c7229d5c 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -55,8 +55,8 @@
*
* As it was said, for the UBI sub-system all physical eraseblocks are either
* "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
- * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or
- * (temporarily) in the @wl->pq queue.
+ * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
+ * RB-trees, as well as (temporarily) in the @wl->pq queue.
*
* When the WL sub-system returns a physical eraseblock, the physical
* eraseblock is protected from being moved for some "time". For this reason,
@@ -83,6 +83,8 @@
* used. The former state corresponds to the @wl->free tree. The latter state
* is split up on several sub-states:
* o the WL movement is allowed (@wl->used tree);
+ * o the WL movement is disallowed (@wl->erroneous) because the PEB is
+ * erroneous - e.g., there was a read error;
* o the WL movement is temporarily prohibited (@wl->pq queue);
* o scrubbing is needed (@wl->scrub tree).
*
@@ -457,6 +459,14 @@ retry:
dbg_wl("PEB %d EC %d", e->pnum, e->ec);
prot_queue_add(ubi, e);
spin_unlock(&ubi->wl_lock);
+
+ err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
+ ubi->peb_size - ubi->vid_hdr_aloffset);
+ if (err) {
+ ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
+ return err > 0 ? -EINVAL : err;
+ }
+
return e->pnum;
}
@@ -653,7 +663,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int cancel)
{
- int err, scrubbing = 0, torture = 0;
+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+ int vol_id = -1, uninitialized_var(lnum);
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
@@ -738,68 +749,78 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
/*
* We are trying to move PEB without a VID header. UBI
* always write VID headers shortly after the PEB was
- * given, so we have a situation when it did not have
- * chance to write it down because it was preempted.
- * Just re-schedule the work, so that next time it will
- * likely have the VID header in place.
+ * given, so we have a situation when it has not yet
+ * had a chance to write it, because it was preempted.
+ * So add this PEB to the protection queue so far,
+ * because presumably more data will be written there
+ * (including the missing VID header), and then we'll
+ * move it.
*/
dbg_wl("PEB %d has no VID header", e1->pnum);
+ protect = 1;
goto out_not_moved;
}
ubi_err("error %d while reading VID header from PEB %d",
err, e1->pnum);
- if (err > 0)
- err = -EIO;
goto out_error;
}
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+
err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
if (err) {
- if (err == -EAGAIN)
+ if (err == MOVE_CANCEL_RACE) {
+ /*
+ * The LEB has not been moved because the volume is
+ * being deleted or the PEB has been put meanwhile. We
+ * should prevent this PEB from being selected for
+ * wear-leveling movement again, so put it to the
+ * protection queue.
+ */
+ protect = 1;
goto out_not_moved;
- if (err < 0)
- goto out_error;
- if (err == 2) {
- /* Target PEB write error, torture it */
+ }
+
+ if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
+ err == MOVE_TARGET_RD_ERR) {
+ /*
+ * Target PEB had bit-flips or write error - torture it.
+ */
torture = 1;
goto out_not_moved;
}
- /*
- * The LEB has not been moved because the volume is being
- * deleted or the PEB has been put meanwhile. We should prevent
- * this PEB from being selected for wear-leveling movement
- * again, so put it to the protection queue.
- */
-
- dbg_wl("canceled moving PEB %d", e1->pnum);
- ubi_assert(err == 1);
-
- ubi_free_vid_hdr(ubi, vid_hdr);
- vid_hdr = NULL;
-
- spin_lock(&ubi->wl_lock);
- prot_queue_add(ubi, e1);
- ubi_assert(!ubi->move_to_put);
- ubi->move_from = ubi->move_to = NULL;
- ubi->wl_scheduled = 0;
- spin_unlock(&ubi->wl_lock);
+ if (err == MOVE_SOURCE_RD_ERR) {
+ /*
+ * An error happened while reading the source PEB. Do
+ * not switch to R/O mode in this case, and give the
+ * upper layers a possibility to recover from this,
+ * e.g. by unmapping corresponding LEB. Instead, just
+ * put this PEB to the @ubi->erroneous list to prevent
+ * UBI from trying to move it over and over again.
+ */
+ if (ubi->erroneous_peb_count > ubi->max_erroneous) {
+ ubi_err("too many erroneous eraseblocks (%d)",
+ ubi->erroneous_peb_count);
+ goto out_error;
+ }
+ erroneous = 1;
+ goto out_not_moved;
+ }
- e1 = NULL;
- err = schedule_erase(ubi, e2, 0);
- if (err)
+ if (err < 0)
goto out_error;
- mutex_unlock(&ubi->move_mutex);
- return 0;
+
+ ubi_assert(0);
}
/* The PEB has been successfully moved */
- ubi_free_vid_hdr(ubi, vid_hdr);
- vid_hdr = NULL;
if (scrubbing)
- ubi_msg("scrubbed PEB %d, data moved to PEB %d",
- e1->pnum, e2->pnum);
+ ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
+ e1->pnum, vol_id, lnum, e2->pnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
spin_lock(&ubi->wl_lock);
if (!ubi->move_to_put) {
@@ -812,8 +833,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = schedule_erase(ubi, e1, 0);
if (err) {
- e1 = NULL;
- goto out_error;
+ kmem_cache_free(ubi_wl_entry_slab, e1);
+ if (e2)
+ kmem_cache_free(ubi_wl_entry_slab, e2);
+ goto out_ro;
}
if (e2) {
@@ -821,10 +844,13 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* Well, the target PEB was put meanwhile, schedule it for
* erasure.
*/
- dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
+ dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
+ e2->pnum, vol_id, lnum);
err = schedule_erase(ubi, e2, 0);
- if (err)
- goto out_error;
+ if (err) {
+ kmem_cache_free(ubi_wl_entry_slab, e2);
+ goto out_ro;
+ }
}
dbg_wl("done");
@@ -837,11 +863,19 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* have been changed, schedule it for erasure.
*/
out_not_moved:
- dbg_wl("canceled moving PEB %d", e1->pnum);
- ubi_free_vid_hdr(ubi, vid_hdr);
- vid_hdr = NULL;
+ if (vol_id != -1)
+ dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
+ e1->pnum, vol_id, lnum, e2->pnum, err);
+ else
+ dbg_wl("cancel moving PEB %d to PEB %d (%d)",
+ e1->pnum, e2->pnum, err);
spin_lock(&ubi->wl_lock);
- if (scrubbing)
+ if (protect)
+ prot_queue_add(ubi, e1);
+ else if (erroneous) {
+ wl_tree_add(e1, &ubi->erroneous);
+ ubi->erroneous_peb_count += 1;
+ } else if (scrubbing)
wl_tree_add(e1, &ubi->scrub);
else
wl_tree_add(e1, &ubi->used);
@@ -850,32 +884,36 @@ out_not_moved:
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- e1 = NULL;
+ ubi_free_vid_hdr(ubi, vid_hdr);
err = schedule_erase(ubi, e2, torture);
- if (err)
- goto out_error;
-
+ if (err) {
+ kmem_cache_free(ubi_wl_entry_slab, e2);
+ goto out_ro;
+ }
mutex_unlock(&ubi->move_mutex);
return 0;
out_error:
- ubi_err("error %d while moving PEB %d to PEB %d",
- err, e1->pnum, e2->pnum);
-
- ubi_free_vid_hdr(ubi, vid_hdr);
+ if (vol_id != -1)
+ ubi_err("error %d while moving PEB %d to PEB %d",
+ err, e1->pnum, e2->pnum);
+ else
+ ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
+ err, e1->pnum, vol_id, lnum, e2->pnum);
spin_lock(&ubi->wl_lock);
ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- if (e1)
- kmem_cache_free(ubi_wl_entry_slab, e1);
- if (e2)
- kmem_cache_free(ubi_wl_entry_slab, e2);
- ubi_ro_mode(ubi);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ kmem_cache_free(ubi_wl_entry_slab, e1);
+ kmem_cache_free(ubi_wl_entry_slab, e2);
+out_ro:
+ ubi_ro_mode(ubi);
mutex_unlock(&ubi->move_mutex);
- return err;
+ ubi_assert(err != 0);
+ return err < 0 ? err : -EIO;
out_cancel:
ubi->wl_scheduled = 0;
@@ -1015,7 +1053,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
* this physical eraseblock for erasure again would cause
- * errors again and again. Well, lets switch to RO mode.
+ * errors again and again. Well, lets switch to R/O mode.
*/
goto out_ro;
}
@@ -1043,10 +1081,9 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi_err("no reserved physical eraseblocks");
goto out_ro;
}
-
spin_unlock(&ubi->volumes_lock);
- ubi_msg("mark PEB %d as bad", pnum);
+ ubi_msg("mark PEB %d as bad", pnum);
err = ubi_io_mark_bad(ubi, pnum);
if (err)
goto out_ro;
@@ -1056,7 +1093,9 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi->bad_peb_count += 1;
ubi->good_peb_count -= 1;
ubi_calculate_reserved(ubi);
- if (ubi->beb_rsvd_pebs == 0)
+ if (ubi->beb_rsvd_pebs)
+ ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
+ else
ubi_warn("last PEB from the reserved pool was used");
spin_unlock(&ubi->volumes_lock);
@@ -1125,6 +1164,13 @@ retry:
} else if (in_wl_tree(e, &ubi->scrub)) {
paranoid_check_in_wl_tree(e, &ubi->scrub);
rb_erase(&e->u.rb, &ubi->scrub);
+ } else if (in_wl_tree(e, &ubi->erroneous)) {
+ paranoid_check_in_wl_tree(e, &ubi->erroneous);
+ rb_erase(&e->u.rb, &ubi->erroneous);
+ ubi->erroneous_peb_count -= 1;
+ ubi_assert(ubi->erroneous_peb_count >= 0);
+ /* Erroneous PEBs should be tortured */
+ torture = 1;
} else {
err = prot_queue_del(ubi, e->pnum);
if (err) {
@@ -1373,7 +1419,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
struct ubi_scan_leb *seb, *tmp;
struct ubi_wl_entry *e;
- ubi->used = ubi->free = ubi->scrub = RB_ROOT;
+ ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
spin_lock_init(&ubi->wl_lock);
mutex_init(&ubi->move_mutex);
init_rwsem(&ubi->work_sem);
@@ -1511,6 +1557,7 @@ void ubi_wl_close(struct ubi_device *ubi)
cancel_pending(ubi);
protection_queue_destroy(ubi);
tree_destroy(&ubi->used);
+ tree_destroy(&ubi->erroneous);
tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub);
kfree(ubi->lookuptbl);