summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-01 12:07:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-01 12:07:34 -0700
commit829f3b9401fe7cc3c1f3642bb2520751a42a87df (patch)
tree55182cd94384d7c26adb398954736d338733b1e0 /fs
parent81e8c10dac62c427b25f6bbb07936806e4dd9e8a (diff)
parent78c08247b9d3e03192f8b359aa079024e805a948 (diff)
Merge tag 'pstore-v5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull pstore updates from Kees Cook: "Fixes and new features for pstore. This is a pretty big set of changes (relative to past pstore pulls), but it has been in -next for a while. The biggest change here is the ability to support a block device as a pstore backend, which has been desired for a while. A lot of additional fixes and refactorings are also included, mostly in support of the new features. - refactor pstore locking for safer module unloading (Kees Cook) - remove orphaned records from pstorefs when backend unloaded (Kees Cook) - refactor dump_oops parameter into max_reason (Pavel Tatashin) - introduce pstore/zone for common code for contiguous storage (WeiXiong Liao) - introduce pstore/blk for block device backend (WeiXiong Liao) - introduce mtd backend (WeiXiong Liao)" * tag 'pstore-v5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (35 commits) mtd: Support kmsg dumper based on pstore/blk pstore/blk: Introduce "best_effort" mode pstore/blk: Support non-block storage devices pstore/blk: Provide way to query pstore configuration pstore/zone: Provide way to skip "broken" zone for MTD devices Documentation: Add details for pstore/blk pstore/zone,blk: Add ftrace frontend support pstore/zone,blk: Add console frontend support pstore/zone,blk: Add support for pmsg frontend pstore/blk: Introduce backend for block devices pstore/zone: Introduce common layer to manage storage zones ramoops: Add "max-reason" optional field to ramoops DT node pstore/ram: Introduce max_reason and convert dump_oops pstore/platform: Pass max_reason to kmesg dump printk: Introduce kmsg_dump_reason_str() printk: honor the max_reason field in kmsg_dumper printk: Collapse shutdown types into a single dump reason pstore/ftrace: Provide ftrace log merging routine pstore/ram: Refactor ftrace buffer merging pstore/ram: Refactor DT size parsing ...
Diffstat (limited to 'fs')
-rw-r--r--fs/pstore/Kconfig109
-rw-r--r--fs/pstore/Makefile6
-rw-r--r--fs/pstore/blk.c517
-rw-r--r--fs/pstore/ftrace.c54
-rw-r--r--fs/pstore/inode.c129
-rw-r--r--fs/pstore/internal.h11
-rw-r--r--fs/pstore/platform.c117
-rw-r--r--fs/pstore/ram.c155
-rw-r--r--fs/pstore/zone.c1465
9 files changed, 2380 insertions, 183 deletions
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 8f0369aad22a..e16a49ebfe54 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -153,3 +153,112 @@ config PSTORE_RAM
"ramoops.ko".
For more information, see Documentation/admin-guide/ramoops.rst.
+
+config PSTORE_ZONE
+ tristate
+ depends on PSTORE
+ help
+ The common layer for pstore/blk (and pstore/ram in the future)
+ to manage storage in zones.
+
+config PSTORE_BLK
+ tristate "Log panic/oops to a block device"
+ depends on PSTORE
+ depends on BLOCK
+ select PSTORE_ZONE
+ default n
+ help
+ This enables panic and oops message to be logged to a block dev
+ where it can be read back at some later point.
+
+ For more information, see Documentation/admin-guide/pstore-blk.rst
+
+ If unsure, say N.
+
+config PSTORE_BLK_BLKDEV
+ string "block device identifier"
+ depends on PSTORE_BLK
+ default ""
+ help
+ Which block device should be used for pstore/blk.
+
+ It accepts the following variants:
+ 1) <hex_major><hex_minor> device number in hexadecimal representation,
+ with no leading 0x, for example b302.
+ 2) /dev/<disk_name> represents the device name of disk
+ 3) /dev/<disk_name><decimal> represents the device name and number
+ of partition - device number of disk plus the partition number
+ 4) /dev/<disk_name>p<decimal> - same as the above, this form is
+ used when disk name of partitioned disk ends with a digit.
+ 5) PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF representing the
+ unique id of a partition if the partition table provides it.
+ The UUID may be either an EFI/GPT UUID, or refer to an MSDOS
+ partition using the format SSSSSSSS-PP, where SSSSSSSS is a zero-
+ filled hex representation of the 32-bit "NT disk signature", and PP
+ is a zero-filled hex representation of the 1-based partition number.
+ 6) PARTUUID=<UUID>/PARTNROFF=<int> to select a partition in relation
+ to a partition with a known unique id.
+ 7) <major>:<minor> major and minor number of the device separated by
+ a colon.
+
+ NOTE that, both Kconfig and module parameters can configure
+ pstore/blk, but module parameters have priority over Kconfig.
+
+config PSTORE_BLK_KMSG_SIZE
+ int "Size in Kbytes of kmsg dump log to store"
+ depends on PSTORE_BLK
+ default 64
+ help
+ This just sets size of kmsg dump (oops, panic, etc) log for
+ pstore/blk. The size is in KB and must be a multiple of 4.
+
+ NOTE that, both Kconfig and module parameters can configure
+ pstore/blk, but module parameters have priority over Kconfig.
+
+config PSTORE_BLK_MAX_REASON
+ int "Maximum kmsg dump reason to store"
+ depends on PSTORE_BLK
+ default 2
+ help
+ The maximum reason for kmsg dumps to store. The default is
+ 2 (KMSG_DUMP_OOPS), see include/linux/kmsg_dump.h's
+ enum kmsg_dump_reason for more details.
+
+ NOTE that, both Kconfig and module parameters can configure
+ pstore/blk, but module parameters have priority over Kconfig.
+
+config PSTORE_BLK_PMSG_SIZE
+ int "Size in Kbytes of pmsg to store"
+ depends on PSTORE_BLK
+ depends on PSTORE_PMSG
+ default 64
+ help
+ This just sets size of pmsg (pmsg_size) for pstore/blk. The size is
+ in KB and must be a multiple of 4.
+
+ NOTE that, both Kconfig and module parameters can configure
+ pstore/blk, but module parameters have priority over Kconfig.
+
+config PSTORE_BLK_CONSOLE_SIZE
+ int "Size in Kbytes of console log to store"
+ depends on PSTORE_BLK
+ depends on PSTORE_CONSOLE
+ default 64
+ help
+ This just sets size of console log (console_size) to store via
+ pstore/blk. The size is in KB and must be a multiple of 4.
+
+ NOTE that, both Kconfig and module parameters can configure
+ pstore/blk, but module parameters have priority over Kconfig.
+
+config PSTORE_BLK_FTRACE_SIZE
+ int "Size in Kbytes of ftrace log to store"
+ depends on PSTORE_BLK
+ depends on PSTORE_FTRACE
+ default 64
+ help
+ This just sets size of ftrace log (ftrace_size) for pstore/blk. The
+ size is in KB and must be a multiple of 4.
+
+ NOTE that, both Kconfig and module parameters can configure
+ pstore/blk, but module parameters have priority over Kconfig.
diff --git a/fs/pstore/Makefile b/fs/pstore/Makefile
index 967b5891f325..c270467aeece 100644
--- a/fs/pstore/Makefile
+++ b/fs/pstore/Makefile
@@ -12,3 +12,9 @@ pstore-$(CONFIG_PSTORE_PMSG) += pmsg.o
ramoops-objs += ram.o ram_core.o
obj-$(CONFIG_PSTORE_RAM) += ramoops.o
+
+pstore_zone-objs += zone.o
+obj-$(CONFIG_PSTORE_ZONE) += pstore_zone.o
+
+pstore_blk-objs += blk.o
+obj-$(CONFIG_PSTORE_BLK) += pstore_blk.o
diff --git a/fs/pstore/blk.c b/fs/pstore/blk.c
new file mode 100644
index 000000000000..fcd5563dde06
--- /dev/null
+++ b/fs/pstore/blk.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implements pstore backend driver that write to block (or non-block) storage
+ * devices, using the pstore/zone API.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "../../block/blk.h"
+#include <linux/blkdev.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pstore_blk.h>
+#include <linux/mount.h>
+#include <linux/uio.h>
+
+static long kmsg_size = CONFIG_PSTORE_BLK_KMSG_SIZE;
+module_param(kmsg_size, long, 0400);
+MODULE_PARM_DESC(kmsg_size, "kmsg dump record size in kbytes");
+
+static int max_reason = CONFIG_PSTORE_BLK_MAX_REASON;
+module_param(max_reason, int, 0400);
+MODULE_PARM_DESC(max_reason,
+ "maximum reason for kmsg dump (default 2: Oops and Panic)");
+
+#if IS_ENABLED(CONFIG_PSTORE_PMSG)
+static long pmsg_size = CONFIG_PSTORE_BLK_PMSG_SIZE;
+#else
+static long pmsg_size = -1;
+#endif
+module_param(pmsg_size, long, 0400);
+MODULE_PARM_DESC(pmsg_size, "pmsg size in kbytes");
+
+#if IS_ENABLED(CONFIG_PSTORE_CONSOLE)
+static long console_size = CONFIG_PSTORE_BLK_CONSOLE_SIZE;
+#else
+static long console_size = -1;
+#endif
+module_param(console_size, long, 0400);
+MODULE_PARM_DESC(console_size, "console size in kbytes");
+
+#if IS_ENABLED(CONFIG_PSTORE_FTRACE)
+static long ftrace_size = CONFIG_PSTORE_BLK_FTRACE_SIZE;
+#else
+static long ftrace_size = -1;
+#endif
+module_param(ftrace_size, long, 0400);
+MODULE_PARM_DESC(ftrace_size, "ftrace size in kbytes");
+
+static bool best_effort;
+module_param(best_effort, bool, 0400);
+MODULE_PARM_DESC(best_effort, "use best effort to write (i.e. do not require storage driver pstore support, default: off)");
+
+/*
+ * blkdev - the block device to use for pstore storage
+ *
+ * Usually, this will be a partition of a block device.
+ *
+ * blkdev accepts the following variants:
+ * 1) <hex_major><hex_minor> device number in hexadecimal representation,
+ * with no leading 0x, for example b302.
+ * 2) /dev/<disk_name> represents the device number of disk
+ * 3) /dev/<disk_name><decimal> represents the device number
+ * of partition - device number of disk plus the partition number
+ * 4) /dev/<disk_name>p<decimal> - same as the above, that form is
+ * used when disk name of partitioned disk ends on a digit.
+ * 5) PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF representing the
+ * unique id of a partition if the partition table provides it.
+ * The UUID may be either an EFI/GPT UUID, or refer to an MSDOS
+ * partition using the format SSSSSSSS-PP, where SSSSSSSS is a zero-
+ * filled hex representation of the 32-bit "NT disk signature", and PP
+ * is a zero-filled hex representation of the 1-based partition number.
+ * 6) PARTUUID=<UUID>/PARTNROFF=<int> to select a partition in relation to
+ * a partition with a known unique id.
+ * 7) <major>:<minor> major and minor number of the device separated by
+ * a colon.
+ */
+static char blkdev[80] = CONFIG_PSTORE_BLK_BLKDEV;
+module_param_string(blkdev, blkdev, 80, 0400);
+MODULE_PARM_DESC(blkdev, "block device for pstore storage");
+
+/*
+ * All globals must only be accessed under the pstore_blk_lock
+ * during the register/unregister functions.
+ */
+static DEFINE_MUTEX(pstore_blk_lock);
+static struct block_device *psblk_bdev;
+static struct pstore_zone_info *pstore_zone_info;
+static pstore_blk_panic_write_op blkdev_panic_write;
+
+struct bdev_info {
+ dev_t devt;
+ sector_t nr_sects;
+ sector_t start_sect;
+};
+
+#define check_size(name, alignsize) ({ \
+ long _##name_ = (name); \
+ _##name_ = _##name_ <= 0 ? 0 : (_##name_ * 1024); \
+ if (_##name_ & ((alignsize) - 1)) { \
+ pr_info(#name " must align to %d\n", \
+ (alignsize)); \
+ _##name_ = ALIGN(name, (alignsize)); \
+ } \
+ _##name_; \
+})
+
+static int __register_pstore_device(struct pstore_device_info *dev)
+{
+ int ret;
+
+ lockdep_assert_held(&pstore_blk_lock);
+
+ if (!dev || !dev->total_size || !dev->read || !dev->write)
+ return -EINVAL;
+
+ /* someone already registered before */
+ if (pstore_zone_info)
+ return -EBUSY;
+
+ pstore_zone_info = kzalloc(sizeof(struct pstore_zone_info), GFP_KERNEL);
+ if (!pstore_zone_info)
+ return -ENOMEM;
+
+ /* zero means not limit on which backends to attempt to store. */
+ if (!dev->flags)
+ dev->flags = UINT_MAX;
+
+#define verify_size(name, alignsize, enabled) { \
+ long _##name_; \
+ if (enabled) \
+ _##name_ = check_size(name, alignsize); \
+ else \
+ _##name_ = 0; \
+ name = _##name_ / 1024; \
+ pstore_zone_info->name = _##name_; \
+ }
+
+ verify_size(kmsg_size, 4096, dev->flags & PSTORE_FLAGS_DMESG);
+ verify_size(pmsg_size, 4096, dev->flags & PSTORE_FLAGS_PMSG);
+ verify_size(console_size, 4096, dev->flags & PSTORE_FLAGS_CONSOLE);
+ verify_size(ftrace_size, 4096, dev->flags & PSTORE_FLAGS_FTRACE);
+#undef verify_size
+
+ pstore_zone_info->total_size = dev->total_size;
+ pstore_zone_info->max_reason = max_reason;
+ pstore_zone_info->read = dev->read;
+ pstore_zone_info->write = dev->write;
+ pstore_zone_info->erase = dev->erase;
+ pstore_zone_info->panic_write = dev->panic_write;
+ pstore_zone_info->name = KBUILD_MODNAME;
+ pstore_zone_info->owner = THIS_MODULE;
+
+ ret = register_pstore_zone(pstore_zone_info);
+ if (ret) {
+ kfree(pstore_zone_info);
+ pstore_zone_info = NULL;
+ }
+ return ret;
+}
+/**
+ * register_pstore_device() - register non-block device to pstore/blk
+ *
+ * @dev: non-block device information
+ *
+ * Return:
+ * * 0 - OK
+ * * Others - something error.
+ */
+int register_pstore_device(struct pstore_device_info *dev)
+{
+ int ret;
+
+ mutex_lock(&pstore_blk_lock);
+ ret = __register_pstore_device(dev);
+ mutex_unlock(&pstore_blk_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_pstore_device);
+
+static void __unregister_pstore_device(struct pstore_device_info *dev)
+{
+ lockdep_assert_held(&pstore_blk_lock);
+ if (pstore_zone_info && pstore_zone_info->read == dev->read) {
+ unregister_pstore_zone(pstore_zone_info);
+ kfree(pstore_zone_info);
+ pstore_zone_info = NULL;
+ }
+}
+
+/**
+ * unregister_pstore_device() - unregister non-block device from pstore/blk
+ *
+ * @dev: non-block device information
+ */
+void unregister_pstore_device(struct pstore_device_info *dev)
+{
+ mutex_lock(&pstore_blk_lock);
+ __unregister_pstore_device(dev);
+ mutex_unlock(&pstore_blk_lock);
+}
+EXPORT_SYMBOL_GPL(unregister_pstore_device);
+
+/**
+ * psblk_get_bdev() - open block device
+ *
+ * @holder: Exclusive holder identifier
+ * @info: Information about bdev to fill in
+ *
+ * Return: pointer to block device on success and others on error.
+ *
+ * On success, the returned block_device has reference count of one.
+ */
+static struct block_device *psblk_get_bdev(void *holder,
+ struct bdev_info *info)
+{
+ struct block_device *bdev = ERR_PTR(-ENODEV);
+ fmode_t mode = FMODE_READ | FMODE_WRITE;
+ sector_t nr_sects;
+
+ lockdep_assert_held(&pstore_blk_lock);
+
+ if (pstore_zone_info)
+ return ERR_PTR(-EBUSY);
+
+ if (!blkdev[0])
+ return ERR_PTR(-ENODEV);
+
+ if (holder)
+ mode |= FMODE_EXCL;
+ bdev = blkdev_get_by_path(blkdev, mode, holder);
+ if (IS_ERR(bdev)) {
+ dev_t devt;
+
+ devt = name_to_dev_t(blkdev);
+ if (devt == 0)
+ return ERR_PTR(-ENODEV);
+ bdev = blkdev_get_by_dev(devt, mode, holder);
+ if (IS_ERR(bdev))
+ return bdev;
+ }
+
+ nr_sects = part_nr_sects_read(bdev->bd_part);
+ if (!nr_sects) {
+ pr_err("not enough space for '%s'\n", blkdev);
+ blkdev_put(bdev, mode);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ if (info) {
+ info->devt = bdev->bd_dev;
+ info->nr_sects = nr_sects;
+ info->start_sect = get_start_sect(bdev);
+ }
+
+ return bdev;
+}
+
+static void psblk_put_bdev(struct block_device *bdev, void *holder)
+{
+ fmode_t mode = FMODE_READ | FMODE_WRITE;
+
+ lockdep_assert_held(&pstore_blk_lock);
+
+ if (!bdev)
+ return;
+
+ if (holder)
+ mode |= FMODE_EXCL;
+ blkdev_put(bdev, mode);
+}
+
+static ssize_t psblk_generic_blk_read(char *buf, size_t bytes, loff_t pos)
+{
+ struct block_device *bdev = psblk_bdev;
+ struct file file;
+ struct kiocb kiocb;
+ struct iov_iter iter;
+ struct kvec iov = {.iov_base = buf, .iov_len = bytes};
+
+ if (!bdev)
+ return -ENODEV;
+
+ memset(&file, 0, sizeof(struct file));
+ file.f_mapping = bdev->bd_inode->i_mapping;
+ file.f_flags = O_DSYNC | __O_SYNC | O_NOATIME;
+ file.f_inode = bdev->bd_inode;
+ file_ra_state_init(&file.f_ra, file.f_mapping);
+
+ init_sync_kiocb(&kiocb, &file);
+ kiocb.ki_pos = pos;
+ iov_iter_kvec(&iter, READ, &iov, 1, bytes);
+
+ return generic_file_read_iter(&kiocb, &iter);
+}
+
+static ssize_t psblk_generic_blk_write(const char *buf, size_t bytes,
+ loff_t pos)
+{
+ struct block_device *bdev = psblk_bdev;
+ struct iov_iter iter;
+ struct kiocb kiocb;
+ struct file file;
+ ssize_t ret;
+ struct kvec iov = {.iov_base = (void *)buf, .iov_len = bytes};
+
+ if (!bdev)
+ return -ENODEV;
+
+ /* Console/Ftrace backend may handle buffer until flush dirty zones */
+ if (in_interrupt() || irqs_disabled())
+ return -EBUSY;
+
+ memset(&file, 0, sizeof(struct file));
+ file.f_mapping = bdev->bd_inode->i_mapping;
+ file.f_flags = O_DSYNC | __O_SYNC | O_NOATIME;
+ file.f_inode = bdev->bd_inode;
+
+ init_sync_kiocb(&kiocb, &file);
+ kiocb.ki_pos = pos;
+ iov_iter_kvec(&iter, WRITE, &iov, 1, bytes);
+
+ inode_lock(bdev->bd_inode);
+ ret = generic_write_checks(&kiocb, &iter);
+ if (ret > 0)
+ ret = generic_perform_write(&file, &iter, pos);
+ inode_unlock(bdev->bd_inode);
+
+ if (likely(ret > 0)) {
+ const struct file_operations f_op = {.fsync = blkdev_fsync};
+
+ file.f_op = &f_op;
+ kiocb.ki_pos += ret;
+ ret = generic_write_sync(&kiocb, ret);
+ }
+ return ret;
+}
+
+static ssize_t psblk_blk_panic_write(const char *buf, size_t size,
+ loff_t off)
+{
+ int ret;
+
+ if (!blkdev_panic_write)
+ return -EOPNOTSUPP;
+
+ /* size and off must align to SECTOR_SIZE for block device */
+ ret = blkdev_panic_write(buf, off >> SECTOR_SHIFT,
+ size >> SECTOR_SHIFT);
+ /* try next zone */
+ if (ret == -ENOMSG)
+ return ret;
+ return ret ? -EIO : size;
+}
+
+static int __register_pstore_blk(struct pstore_blk_info *info)
+{
+ char bdev_name[BDEVNAME_SIZE];
+ struct block_device *bdev;
+ struct pstore_device_info dev;
+ struct bdev_info binfo;
+ void *holder = blkdev;
+ int ret = -ENODEV;
+
+ lockdep_assert_held(&pstore_blk_lock);
+
+ /* hold bdev exclusively */
+ memset(&binfo, 0, sizeof(binfo));
+ bdev = psblk_get_bdev(holder, &binfo);
+ if (IS_ERR(bdev)) {
+ pr_err("failed to open '%s'!\n", blkdev);
+ return PTR_ERR(bdev);
+ }
+
+ /* only allow driver matching the @blkdev */
+ if (!binfo.devt || (!best_effort &&
+ MAJOR(binfo.devt) != info->major)) {
+ pr_debug("invalid major %u (expect %u)\n",
+ info->major, MAJOR(binfo.devt));
+ ret = -ENODEV;
+ goto err_put_bdev;
+ }
+
+ /* psblk_bdev must be assigned before register to pstore/blk */
+ psblk_bdev = bdev;
+ blkdev_panic_write = info->panic_write;
+
+ /* Copy back block device details. */
+ info->devt = binfo.devt;
+ info->nr_sects = binfo.nr_sects;
+ info->start_sect = binfo.start_sect;
+
+ memset(&dev, 0, sizeof(dev));
+ dev.total_size = info->nr_sects << SECTOR_SHIFT;
+ dev.flags = info->flags;
+ dev.read = psblk_generic_blk_read;
+ dev.write = psblk_generic_blk_write;
+ dev.erase = NULL;
+ dev.panic_write = info->panic_write ? psblk_blk_panic_write : NULL;
+
+ ret = __register_pstore_device(&dev);
+ if (ret)
+ goto err_put_bdev;
+
+ bdevname(bdev, bdev_name);
+ pr_info("attached %s%s\n", bdev_name,
+ info->panic_write ? "" : " (no dedicated panic_write!)");
+ return 0;
+
+err_put_bdev:
+ psblk_bdev = NULL;
+ blkdev_panic_write = NULL;
+ psblk_put_bdev(bdev, holder);
+ return ret;
+}
+
+/**
+ * register_pstore_blk() - register block device to pstore/blk
+ *
+ * @info: details on the desired block device interface
+ *
+ * Return:
+ * * 0 - OK
+ * * Others - something error.
+ */
+int register_pstore_blk(struct pstore_blk_info *info)
+{
+ int ret;
+
+ mutex_lock(&pstore_blk_lock);
+ ret = __register_pstore_blk(info);
+ mutex_unlock(&pstore_blk_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_pstore_blk);
+
+static void __unregister_pstore_blk(unsigned int major)
+{
+ struct pstore_device_info dev = { .read = psblk_generic_blk_read };
+ void *holder = blkdev;
+
+ lockdep_assert_held(&pstore_blk_lock);
+ if (psblk_bdev && MAJOR(psblk_bdev->bd_dev) == major) {
+ __unregister_pstore_device(&dev);
+ psblk_put_bdev(psblk_bdev, holder);
+ blkdev_panic_write = NULL;
+ psblk_bdev = NULL;
+ }
+}
+
+/**
+ * unregister_pstore_blk() - unregister block device from pstore/blk
+ *
+ * @major: the major device number of device
+ */
+void unregister_pstore_blk(unsigned int major)
+{
+ mutex_lock(&pstore_blk_lock);
+ __unregister_pstore_blk(major);
+ mutex_unlock(&pstore_blk_lock);
+}
+EXPORT_SYMBOL_GPL(unregister_pstore_blk);
+
+/* get information of pstore/blk */
+int pstore_blk_get_config(struct pstore_blk_config *info)
+{
+ strncpy(info->device, blkdev, 80);
+ info->max_reason = max_reason;
+ info->kmsg_size = check_size(kmsg_size, 4096);
+ info->pmsg_size = check_size(pmsg_size, 4096);
+ info->ftrace_size = check_size(ftrace_size, 4096);
+ info->console_size = check_size(console_size, 4096);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pstore_blk_get_config);
+
+static int __init pstore_blk_init(void)
+{
+ struct pstore_blk_info info = { };
+ int ret = 0;
+
+ mutex_lock(&pstore_blk_lock);
+ if (!pstore_zone_info && best_effort && blkdev[0])
+ ret = __register_pstore_blk(&info);
+ mutex_unlock(&pstore_blk_lock);
+
+ return ret;
+}
+late_initcall(pstore_blk_init);
+
+static void __exit pstore_blk_exit(void)
+{
+ mutex_lock(&pstore_blk_lock);
+ if (psblk_bdev)
+ __unregister_pstore_blk(MAJOR(psblk_bdev->bd_dev));
+ else {
+ struct pstore_device_info dev = { };
+
+ if (pstore_zone_info)
+ dev.read = pstore_zone_info->read;
+ __unregister_pstore_device(&dev);
+ }
+ mutex_unlock(&pstore_blk_lock);
+}
+module_exit(pstore_blk_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>");
+MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
+MODULE_DESCRIPTION("pstore backend for block devices");
diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c
index bfbfc2698070..5c0450701293 100644
--- a/fs/pstore/ftrace.c
+++ b/fs/pstore/ftrace.c
@@ -16,6 +16,7 @@
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/cache.h>
+#include <linux/slab.h>
#include <asm/barrier.h>
#include "internal.h"
@@ -132,3 +133,56 @@ void pstore_unregister_ftrace(void)
debugfs_remove_recursive(pstore_ftrace_dir);
}
+
+ssize_t pstore_ftrace_combine_log(char **dest_log, size_t *dest_log_size,
+ const char *src_log, size_t src_log_size)
+{
+ size_t dest_size, src_size, total, dest_off, src_off;
+ size_t dest_idx = 0, src_idx = 0, merged_idx = 0;
+ void *merged_buf;
+ struct pstore_ftrace_record *drec, *srec, *mrec;
+ size_t record_size = sizeof(struct pstore_ftrace_record);
+
+ dest_off = *dest_log_size % record_size;
+ dest_size = *dest_log_size - dest_off;
+
+ src_off = src_log_size % record_size;
+ src_size = src_log_size - src_off;
+
+ total = dest_size + src_size;
+ merged_buf = kmalloc(total, GFP_KERNEL);
+ if (!merged_buf)
+ return -ENOMEM;
+
+ drec = (struct pstore_ftrace_record *)(*dest_log + dest_off);
+ srec = (struct pstore_ftrace_record *)(src_log + src_off);
+ mrec = (struct pstore_ftrace_record *)(merged_buf);
+
+ while (dest_size > 0 && src_size > 0) {
+ if (pstore_ftrace_read_timestamp(&drec[dest_idx]) <
+ pstore_ftrace_read_timestamp(&srec[src_idx])) {
+ mrec[merged_idx++] = drec[dest_idx++];
+ dest_size -= record_size;
+ } else {
+ mrec[merged_idx++] = srec[src_idx++];
+ src_size -= record_size;
+ }
+ }
+
+ while (dest_size > 0) {
+ mrec[merged_idx++] = drec[dest_idx++];
+ dest_size -= record_size;
+ }
+
+ while (src_size > 0) {
+ mrec[merged_idx++] = srec[src_idx++];
+ src_size -= record_size;
+ }
+
+ kfree(*dest_log);
+ *dest_log = merged_buf;
+ *dest_log_size = total;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pstore_ftrace_combine_log);
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index d99b5d39aa90..c331efe8de95 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -22,18 +22,21 @@
#include <linux/magic.h>
#include <linux/pstore.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include "internal.h"
#define PSTORE_NAMELEN 64
-static DEFINE_SPINLOCK(allpstore_lock);
-static LIST_HEAD(allpstore);
+static DEFINE_MUTEX(records_list_lock);
+static LIST_HEAD(records_list);
+
+static DEFINE_MUTEX(pstore_sb_lock);
+static struct super_block *pstore_sb;
struct pstore_private {
struct list_head list;
+ struct dentry *dentry;
struct pstore_record *record;
size_t total_size;
};
@@ -178,10 +181,22 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
{
struct pstore_private *p = d_inode(dentry)->i_private;
struct pstore_record *record = p->record;
+ int rc = 0;
if (!record->psi->erase)
return -EPERM;
+ /* Make sure we can't race while removing this file. */
+ mutex_lock(&records_list_lock);
+ if (!list_empty(&p->list))
+ list_del_init(&p->list);
+ else
+ rc = -ENOENT;
+ p->dentry = NULL;
+ mutex_unlock(&records_list_lock);
+ if (rc)
+ return rc;
+
mutex_lock(&record->psi->read_mutex);
record->psi->erase(record);
mutex_unlock(&record->psi->read_mutex);
@@ -192,15 +207,9 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
static void pstore_evict_inode(struct inode *inode)
{
struct pstore_private *p = inode->i_private;
- unsigned long flags;
clear_inode(inode);
- if (p) {
- spin_lock_irqsave(&allpstore_lock, flags);
- list_del(&p->list);
- spin_unlock_irqrestore(&allpstore_lock, flags);
- free_pstore_private(p);
- }
+ free_pstore_private(p);
}
static const struct inode_operations pstore_dir_inode_operations = {
@@ -278,11 +287,54 @@ static const struct super_operations pstore_ops = {
.show_options = pstore_show_options,
};
-static struct super_block *pstore_sb;
+static struct dentry *psinfo_lock_root(void)
+{
+ struct dentry *root;
-bool pstore_is_mounted(void)
+ mutex_lock(&pstore_sb_lock);
+ /*
+ * Having no backend is fine -- no records appear.
+ * Not being mounted is fine -- nothing to do.
+ */
+ if (!psinfo || !pstore_sb) {
+ mutex_unlock(&pstore_sb_lock);
+ return NULL;
+ }
+
+ root = pstore_sb->s_root;
+ inode_lock(d_inode(root));
+ mutex_unlock(&pstore_sb_lock);
+
+ return root;
+}
+
+int pstore_put_backend_records(struct pstore_info *psi)
{
- return pstore_sb != NULL;
+ struct pstore_private *pos, *tmp;
+ struct dentry *root;
+ int rc = 0;
+
+ root = psinfo_lock_root();
+ if (!root)
+ return 0;
+
+ mutex_lock(&records_list_lock);
+ list_for_each_entry_safe(pos, tmp, &records_list, list) {
+ if (pos->record->psi == psi) {
+ list_del_init(&pos->list);
+ rc = simple_unlink(d_inode(root), pos->dentry);
+ if (WARN_ON(rc))
+ break;
+ d_drop(pos->dentry);
+ dput(pos->dentry);
+ pos->dentry = NULL;
+ }
+ }
+ mutex_unlock(&records_list_lock);
+
+ inode_unlock(d_inode(root));
+
+ return rc;
}
/*
@@ -297,23 +349,20 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
int rc = 0;
char name[PSTORE_NAMELEN];
struct pstore_private *private, *pos;
- unsigned long flags;
size_t size = record->size + record->ecc_notice_size;
- WARN_ON(!inode_is_locked(d_inode(root)));
+ if (WARN_ON(!inode_is_locked(d_inode(root))))
+ return -EINVAL;
- spin_lock_irqsave(&allpstore_lock, flags);
- list_for_each_entry(pos, &allpstore, list) {
+ rc = -EEXIST;
+ /* Skip records that are already present in the filesystem. */
+ mutex_lock(&records_list_lock);
+ list_for_each_entry(pos, &records_list, list) {
if (pos->record->type == record->type &&
pos->record->id == record->id &&
- pos->record->psi == record->psi) {
- rc = -EEXIST;
- break;
- }
+ pos->record->psi == record->psi)
+ goto fail;
}
- spin_unlock_irqrestore(&allpstore_lock, flags);
- if (rc)
- return rc;
rc = -ENOMEM;
inode = pstore_get_inode(root->d_sb);
@@ -334,6 +383,7 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
if (!dentry)
goto fail_private;
+ private->dentry = dentry;
private->record = record;
inode->i_size = private->total_size = size;
inode->i_private = private;
@@ -343,9 +393,8 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
d_add(dentry, inode);
- spin_lock_irqsave(&allpstore_lock, flags);
- list_add(&private->list, &allpstore);
- spin_unlock_irqrestore(&allpstore_lock, flags);
+ list_add(&private->list, &records_list);
+ mutex_unlock(&records_list_lock);
return 0;
@@ -353,8 +402,8 @@ fail_private:
free_pstore_private(private);
fail_inode:
iput(inode);
-
fail:
+ mutex_unlock(&records_list_lock);
return rc;
}
@@ -366,16 +415,13 @@ fail:
*/
void pstore_get_records(int quiet)
{
- struct pstore_info *psi = psinfo;
struct dentry *root;
- if (!psi || !pstore_sb)
+ root = psinfo_lock_root();
+ if (!root)
return;
- root = pstore_sb->s_root;
-
- inode_lock(d_inode(root));
- pstore_get_backend_records(psi, root, quiet);
+ pstore_get_backend_records(psinfo, root, quiet);
inode_unlock(d_inode(root));
}
@@ -383,8 +429,6 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode;
- pstore_sb = sb;
-
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
@@ -405,6 +449,10 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
if (!sb->s_root)
return -ENOMEM;
+ mutex_lock(&pstore_sb_lock);
+ pstore_sb = sb;
+ mutex_unlock(&pstore_sb_lock);
+
pstore_get_records(0);
return 0;
@@ -418,8 +466,17 @@ static struct dentry *pstore_mount(struct file_system_type *fs_type,
static void pstore_kill_sb(struct super_block *sb)
{
+ mutex_lock(&pstore_sb_lock);
+ WARN_ON(pstore_sb != sb);
+
kill_litter_super(sb);
pstore_sb = NULL;
+
+ mutex_lock(&records_list_lock);
+ INIT_LIST_HEAD(&records_list);
+ mutex_unlock(&records_list_lock);
+
+ mutex_unlock(&pstore_sb_lock);
}
static struct file_system_type pstore_fs_type = {
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index 7062ea4bc57c..7fb219042f13 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -12,9 +12,18 @@ extern unsigned long kmsg_bytes;
#ifdef CONFIG_PSTORE_FTRACE
extern void pstore_register_ftrace(void);
extern void pstore_unregister_ftrace(void);
+ssize_t pstore_ftrace_combine_log(char **dest_log, size_t *dest_log_size,
+ const char *src_log, size_t src_log_size);
#else
static inline void pstore_register_ftrace(void) {}
static inline void pstore_unregister_ftrace(void) {}
+static inline ssize_t
+pstore_ftrace_combine_log(char **dest_log, size_t *dest_log_size,
+ const char *src_log, size_t src_log_size)
+{
+ *dest_log_size = 0;
+ return 0;
+}
#endif
#ifdef CONFIG_PSTORE_PMSG
@@ -31,9 +40,9 @@ extern void pstore_set_kmsg_bytes(int);
extern void pstore_get_records(int);
extern void pstore_get_backend_records(struct pstore_info *psi,
struct dentry *root, int quiet);
+extern int pstore_put_backend_records(struct pstore_info *psi);
extern int pstore_mkfile(struct dentry *root,
struct pstore_record *record);
-extern bool pstore_is_mounted(void);
extern void pstore_record_init(struct pstore_record *record,
struct pstore_info *psi);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 408277ee3cdb..a9e297eefdff 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -44,7 +44,7 @@ static int pstore_update_ms = -1;
module_param_named(update_ms, pstore_update_ms, int, 0600);
MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
"(default is -1, which means runtime updates are disabled; "
- "enabling this option is not safe, it may lead to further "
+ "enabling this option may not be safe; it may lead to further "
"corruption on Oopses)");
/* Names should be in the same order as the enum pstore_type_id */
@@ -69,19 +69,25 @@ static void pstore_dowork(struct work_struct *);
static DECLARE_WORK(pstore_work, pstore_dowork);
/*
- * pstore_lock just protects "psinfo" during
- * calls to pstore_register()
+ * psinfo_lock protects "psinfo" during calls to
+ * pstore_register(), pstore_unregister(), and
+ * the filesystem mount/unmount routines.
*/
-static DEFINE_SPINLOCK(pstore_lock);
+static DEFINE_MUTEX(psinfo_lock);
struct pstore_info *psinfo;
static char *backend;
+module_param(backend, charp, 0444);
+MODULE_PARM_DESC(backend, "specific backend to use");
+
static char *compress =
#ifdef CONFIG_PSTORE_COMPRESS_DEFAULT
CONFIG_PSTORE_COMPRESS_DEFAULT;
#else
NULL;
#endif
+module_param(compress, charp, 0444);
+MODULE_PARM_DESC(compress, "compression to use");
/* Compression parameters */
static struct crypto_comp *tfm;
@@ -129,24 +135,12 @@ enum pstore_type_id pstore_name_to_type(const char *name)
}
EXPORT_SYMBOL_GPL(pstore_name_to_type);
-static const char *get_reason_str(enum kmsg_dump_reason reason)
+static void pstore_timer_kick(void)
{
- switch (reason) {
- case KMSG_DUMP_PANIC:
- return "Panic";
- case KMSG_DUMP_OOPS:
- return "Oops";
- case KMSG_DUMP_EMERG:
- return "Emergency";
- case KMSG_DUMP_RESTART:
- return "Restart";
- case KMSG_DUMP_HALT:
- return "Halt";
- case KMSG_DUMP_POWEROFF:
- return "Poweroff";
- default:
- return "Unknown";
- }
+ if (pstore_update_ms < 0)
+ return;
+
+ mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
}
/*
@@ -393,7 +387,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
unsigned int part = 1;
int ret;
- why = get_reason_str(reason);
+ why = kmsg_dump_reason_str(reason);
if (down_trylock(&psinfo->buf_lock)) {
/* Failed to acquire lock: give up if we cannot wait. */
@@ -459,8 +453,10 @@ static void pstore_dump(struct kmsg_dumper *dumper,
}
ret = psinfo->write(&record);
- if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
+ if (ret == 0 && reason == KMSG_DUMP_OOPS) {
pstore_new_entry = 1;
+ pstore_timer_kick();
+ }
total += record.size;
part++;
@@ -503,14 +499,20 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c)
}
static struct console pstore_console = {
- .name = "pstore",
.write = pstore_console_write,
- .flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME,
.index = -1,
};
static void pstore_register_console(void)
{
+ /* Show which backend is going to get console writes. */
+ strscpy(pstore_console.name, psinfo->name,
+ sizeof(pstore_console.name));
+ /*
+ * Always initialize flags here since prior unregister_console()
+ * calls may have changed settings (specifically CON_ENABLED).
+ */
+ pstore_console.flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME;
register_console(&pstore_console);
}
@@ -555,8 +557,6 @@ out:
*/
int pstore_register(struct pstore_info *psi)
{
- struct module *owner = psi->owner;
-
if (backend && strcmp(backend, psi->name)) {
pr_warn("ignoring unexpected backend '%s'\n", psi->name);
return -EPERM;
@@ -576,11 +576,11 @@ int pstore_register(struct pstore_info *psi)
return -EINVAL;
}
- spin_lock(&pstore_lock);
+ mutex_lock(&psinfo_lock);
if (psinfo) {
pr_warn("backend '%s' already loaded: ignoring '%s'\n",
psinfo->name, psi->name);
- spin_unlock(&pstore_lock);
+ mutex_unlock(&psinfo_lock);
return -EBUSY;
}
@@ -589,21 +589,16 @@ int pstore_register(struct pstore_info *psi)
psinfo = psi;
mutex_init(&psinfo->read_mutex);
sema_init(&psinfo->buf_lock, 1);
- spin_unlock(&pstore_lock);
-
- if (owner && !try_module_get(owner)) {
- psinfo = NULL;
- return -EINVAL;
- }
if (psi->flags & PSTORE_FLAGS_DMESG)
allocate_buf_for_compression();
- if (pstore_is_mounted())
- pstore_get_records(0);
+ pstore_get_records(0);
- if (psi->flags & PSTORE_FLAGS_DMESG)
+ if (psi->flags & PSTORE_FLAGS_DMESG) {
+ pstore_dumper.max_reason = psinfo->max_reason;
pstore_register_kmsg();
+ }
if (psi->flags & PSTORE_FLAGS_CONSOLE)
pstore_register_console();
if (psi->flags & PSTORE_FLAGS_FTRACE)
@@ -612,33 +607,36 @@ int pstore_register(struct pstore_info *psi)
pstore_register_pmsg();
/* Start watching for new records, if desired. */
- if (pstore_update_ms >= 0) {
- pstore_timer.expires = jiffies +
- msecs_to_jiffies(pstore_update_ms);
- add_timer(&pstore_timer);
- }
+ pstore_timer_kick();
/*
* Update the module parameter backend, so it is visible
* through /sys/module/pstore/parameters/backend
*/
- backend = psi->name;
+ backend = kstrdup(psi->name, GFP_KERNEL);
pr_info("Registered %s as persistent store backend\n", psi->name);
- module_put(owner);
-
+ mutex_unlock(&psinfo_lock);
return 0;
}
EXPORT_SYMBOL_GPL(pstore_register);
void pstore_unregister(struct pstore_info *psi)
{
- /* Stop timer and make sure all work has finished. */
- pstore_update_ms = -1;
- del_timer_sync(&pstore_timer);
- flush_work(&pstore_work);
+ /* It's okay to unregister nothing. */
+ if (!psi)
+ return;
+
+ mutex_lock(&psinfo_lock);
+
+ /* Only one backend can be registered at a time. */
+ if (WARN_ON(psi != psinfo)) {
+ mutex_unlock(&psinfo_lock);
+ return;
+ }
+ /* Unregister all callbacks. */
if (psi->flags & PSTORE_FLAGS_PMSG)
pstore_unregister_pmsg();
if (psi->flags & PSTORE_FLAGS_FTRACE)
@@ -648,10 +646,19 @@ void pstore_unregister(struct pstore_info *psi)
if (psi->flags & PSTORE_FLAGS_DMESG)
pstore_unregister_kmsg();
+ /* Stop timer and make sure all work has finished. */
+ del_timer_sync(&pstore_timer);
+ flush_work(&pstore_work);
+
+ /* Remove all backend records from filesystem tree. */
+ pstore_put_backend_records(psi);
+
free_buf_for_compression();
psinfo = NULL;
+ kfree(backend);
backend = NULL;
+ mutex_unlock(&psinfo_lock);
}
EXPORT_SYMBOL_GPL(pstore_unregister);
@@ -788,9 +795,7 @@ static void pstore_timefunc(struct timer_list *unused)
schedule_work(&pstore_work);
}
- if (pstore_update_ms >= 0)
- mod_timer(&pstore_timer,
- jiffies + msecs_to_jiffies(pstore_update_ms));
+ pstore_timer_kick();
}
static void __init pstore_choose_compression(void)
@@ -835,11 +840,5 @@ static void __exit pstore_exit(void)
}
module_exit(pstore_exit)
-module_param(compress, charp, 0444);
-MODULE_PARM_DESC(compress, "Pstore compression to use");
-
-module_param(backend, charp, 0444);
-MODULE_PARM_DESC(backend, "Pstore backend to use");
-
MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 795622190c01..ca6d8a867285 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -21,6 +21,7 @@
#include <linux/pstore_ram.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include "internal.h"
#define RAMOOPS_KERNMSG_HDR "===="
#define MIN_MEM_SIZE 4096UL
@@ -53,22 +54,27 @@ MODULE_PARM_DESC(mem_size,
"size of reserved RAM used to store oops/panic logs");
static unsigned int mem_type;
-module_param(mem_type, uint, 0600);
+module_param(mem_type, uint, 0400);
MODULE_PARM_DESC(mem_type,
"set to 1 to try to use unbuffered memory (default 0)");
-static int dump_oops = 1;
-module_param(dump_oops, int, 0600);
-MODULE_PARM_DESC(dump_oops,
- "set to 1 to dump oopses, 0 to only dump panics (default 1)");
+static int ramoops_max_reason = -1;
+module_param_named(max_reason, ramoops_max_reason, int, 0400);
+MODULE_PARM_DESC(max_reason,
+ "maximum reason for kmsg dump (default 2: Oops and Panic) ");
static int ramoops_ecc;
-module_param_named(ecc, ramoops_ecc, int, 0600);
+module_param_named(ecc, ramoops_ecc, int, 0400);
MODULE_PARM_DESC(ramoops_ecc,
"if non-zero, the option enables ECC support and specifies "
"ECC buffer size in bytes (1 is a special value, means 16 "
"bytes ECC)");
+static int ramoops_dump_oops = -1;
+module_param_named(dump_oops, ramoops_dump_oops, int, 0400);
+MODULE_PARM_DESC(dump_oops,
+ "(deprecated: use max_reason instead) set to 1 to dump oopses & panics, 0 to only dump panics");
+
struct ramoops_context {
struct persistent_ram_zone **dprzs; /* Oops dump zones */
struct persistent_ram_zone *cprz; /* Console zone */
@@ -81,7 +87,6 @@ struct ramoops_context {
size_t console_size;
size_t ftrace_size;
size_t pmsg_size;
- int dump_oops;
u32 flags;
struct persistent_ram_ecc_info ecc_info;
unsigned int max_dump_cnt;
@@ -168,58 +173,6 @@ static bool prz_ok(struct persistent_ram_zone *prz)
persistent_ram_ecc_string(prz, NULL, 0));
}
-static ssize_t ftrace_log_combine(struct persistent_ram_zone *dest,
- struct persistent_ram_zone *src)
-{
- size_t dest_size, src_size, total, dest_off, src_off;
- size_t dest_idx = 0, src_idx = 0, merged_idx = 0;
- void *merged_buf;
- struct pstore_ftrace_record *drec, *srec, *mrec;
- size_t record_size = sizeof(struct pstore_ftrace_record);
-
- dest_off = dest->old_log_size % record_size;
- dest_size = dest->old_log_size - dest_off;
-
- src_off = src->old_log_size % record_size;
- src_size = src->old_log_size - src_off;
-
- total = dest_size + src_size;
- merged_buf = kmalloc(total, GFP_KERNEL);
- if (!merged_buf)
- return -ENOMEM;
-
- drec = (struct pstore_ftrace_record *)(dest->old_log + dest_off);
- srec = (struct pstore_ftrace_record *)(src->old_log + src_off);
- mrec = (struct pstore_ftrace_record *)(merged_buf);
-
- while (dest_size > 0 && src_size > 0) {
- if (pstore_ftrace_read_timestamp(&drec[dest_idx]) <
- pstore_ftrace_read_timestamp(&srec[src_idx])) {
- mrec[merged_idx++] = drec[dest_idx++];
- dest_size -= record_size;
- } else {
- mrec[merged_idx++] = srec[src_idx++];
- src_size -= record_size;
- }
- }
-
- while (dest_size > 0) {
- mrec[merged_idx++] = drec[dest_idx++];
- dest_size -= record_size;
- }
-
- while (src_size > 0) {
- mrec[merged_idx++] = srec[src_idx++];
- src_size -= record_size;
- }
-
- kfree(dest->old_log);
- dest->old_log = merged_buf;
- dest->old_log_size = total;
-
- return 0;
-}
-
static ssize_t ramoops_pstore_read(struct pstore_record *record)
{
ssize_t size = 0;
@@ -291,7 +244,12 @@ static ssize_t ramoops_pstore_read(struct pstore_record *record)
tmp_prz->corrected_bytes +=
prz_next->corrected_bytes;
tmp_prz->bad_blocks += prz_next->bad_blocks;
- size = ftrace_log_combine(tmp_prz, prz_next);
+
+ size = pstore_ftrace_combine_log(
+ &tmp_prz->old_log,
+ &tmp_prz->old_log_size,
+ prz_next->old_log,
+ prz_next->old_log_size);
if (size)
goto out;
}
@@ -382,16 +340,14 @@ static int notrace ramoops_pstore_write(struct pstore_record *record)
return -EINVAL;
/*
- * Out of the various dmesg dump types, ramoops is currently designed
- * to only store crash logs, rather than storing general kernel logs.
+ * We could filter on record->reason here if we wanted to (which
+ * would duplicate what happened before the "max_reason" setting
+ * was added), but that would defeat the purpose of a system
+ * changing printk.always_kmsg_dump, so instead log everything that
+ * the kmsg dumper sends us, since it should be doing the filtering
+ * based on the combination of printk.always_kmsg_dump and our
+ * requested "max_reason".
*/
- if (record->reason != KMSG_DUMP_OOPS &&
- record->reason != KMSG_DUMP_PANIC)
- return -EINVAL;
-
- /* Skip Oopes when configured to do so. */
- if (record->reason == KMSG_DUMP_OOPS && !cxt->dump_oops)
- return -EINVAL;
/*
* Explicitly only take the first part of any new crash.
@@ -644,19 +600,25 @@ static int ramoops_init_prz(const char *name,
return 0;
}
-static int ramoops_parse_dt_size(struct platform_device *pdev,
- const char *propname, u32 *value)
+/* Read a u32 from a dt property and make sure it's safe for an int. */
+static int ramoops_parse_dt_u32(struct platform_device *pdev,
+ const char *propname,
+ u32 default_value, u32 *value)
{
u32 val32 = 0;
int ret;
ret = of_property_read_u32(pdev->dev.of_node, propname, &val32);
- if (ret < 0 && ret != -EINVAL) {
+ if (ret == -EINVAL) {
+ /* field is missing, use default value. */
+ val32 = default_value;
+ } else if (ret < 0) {
dev_err(&pdev->dev, "failed to parse property %s: %d\n",
propname, ret);
return ret;
}
+ /* Sanity check our results. */
if (val32 > INT_MAX) {
dev_err(&pdev->dev, "%s %u > INT_MAX\n", propname, val32);
return -EOVERFLOW;
@@ -687,23 +649,32 @@ static int ramoops_parse_dt(struct platform_device *pdev,
pdata->mem_size = resource_size(res);
pdata->mem_address = res->start;
pdata->mem_type = of_property_read_bool(of_node, "unbuffered");
- pdata->dump_oops = !of_property_read_bool(of_node, "no-dump-oops");
-
-#define parse_size(name, field) { \
- ret = ramoops_parse_dt_size(pdev, name, &value); \
+ /*
+ * Setting "no-dump-oops" is deprecated and will be ignored if
+ * "max_reason" is also specified.
+ */
+ if (of_property_read_bool(of_node, "no-dump-oops"))
+ pdata->max_reason = KMSG_DUMP_PANIC;
+ else
+ pdata->max_reason = KMSG_DUMP_OOPS;
+
+#define parse_u32(name, field, default_value) { \
+ ret = ramoops_parse_dt_u32(pdev, name, default_value, \
+ &value); \
if (ret < 0) \
return ret; \
field = value; \
}
- parse_size("record-size", pdata->record_size);
- parse_size("console-size", pdata->console_size);
- parse_size("ftrace-size", pdata->ftrace_size);
- parse_size("pmsg-size", pdata->pmsg_size);
- parse_size("ecc-size", pdata->ecc_info.ecc_size);
- parse_size("flags", pdata->flags);
+ parse_u32("record-size", pdata->record_size, 0);
+ parse_u32("console-size", pdata->console_size, 0);
+ parse_u32("ftrace-size", pdata->ftrace_size, 0);
+ parse_u32("pmsg-size", pdata->pmsg_size, 0);
+ parse_u32("ecc-size", pdata->ecc_info.ecc_size, 0);
+ parse_u32("flags", pdata->flags, 0);
+ parse_u32("max-reason", pdata->max_reason, pdata->max_reason);
-#undef parse_size
+#undef parse_u32
/*
* Some old Chromebooks relied on the kernel setting the
@@ -785,7 +756,6 @@ static int ramoops_probe(struct platform_device *pdev)
cxt->console_size = pdata->console_size;
cxt->ftrace_size = pdata->ftrace_size;
cxt->pmsg_size = pdata->pmsg_size;
- cxt->dump_oops = pdata->dump_oops;
cxt->flags = pdata->flags;
cxt->ecc_info = pdata->ecc_info;
@@ -828,8 +798,10 @@ static int ramoops_probe(struct platform_device *pdev)
* the single region size is how to check.
*/
cxt->pstore.flags = 0;
- if (cxt->max_dump_cnt)
+ if (cxt->max_dump_cnt) {
cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
+ cxt->pstore.max_reason = pdata->max_reason;
+ }
if (cxt->console_size)
cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
if (cxt->max_ftrace_cnt)
@@ -865,7 +837,7 @@ static int ramoops_probe(struct platform_device *pdev)
mem_size = pdata->mem_size;
mem_address = pdata->mem_address;
record_size = pdata->record_size;
- dump_oops = pdata->dump_oops;
+ ramoops_max_reason = pdata->max_reason;
ramoops_console_size = pdata->console_size;
ramoops_pmsg_size = pdata->pmsg_size;
ramoops_ftrace_size = pdata->ftrace_size;
@@ -948,7 +920,16 @@ static void __init ramoops_register_dummy(void)
pdata.console_size = ramoops_console_size;
pdata.ftrace_size = ramoops_ftrace_size;
pdata.pmsg_size = ramoops_pmsg_size;
- pdata.dump_oops = dump_oops;
+ /* If "max_reason" is set, its value has priority over "dump_oops". */
+ if (ramoops_max_reason >= 0)
+ pdata.max_reason = ramoops_max_reason;
+ /* Otherwise, if "dump_oops" is set, parse it into "max_reason". */
+ else if (ramoops_dump_oops != -1)
+ pdata.max_reason = ramoops_dump_oops ? KMSG_DUMP_OOPS
+ : KMSG_DUMP_PANIC;
+ /* And if neither are explicitly set, use the default. */
+ else
+ pdata.max_reason = KMSG_DUMP_OOPS;
pdata.flags = RAMOOPS_FLAG_FTRACE_PER_CPU;
/*
diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c
new file mode 100644
index 000000000000..819428dfa32f
--- /dev/null
+++ b/fs/pstore/zone.c
@@ -0,0 +1,1465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Provide a pstore intermediate backend, organized into kernel memory
+ * allocated zones that are then mapped and flushed into a single
+ * contiguous region on a storage backend of some kind (block, mtd, etc).
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mount.h>
+#include <linux/printk.h>
+#include <linux/fs.h>
+#include <linux/pstore_zone.h>
+#include <linux/kdev_t.h>
+#include <linux/device.h>
+#include <linux/namei.h>
+#include <linux/fcntl.h>
+#include <linux/uio.h>
+#include <linux/writeback.h>
+#include "internal.h"
+
+/**
+ * struct psz_head - header of zone to flush to storage
+ *
+ * @sig: signature to indicate header (PSZ_SIG xor PSZONE-type value)
+ * @datalen: length of data in @data
+ * @start: offset into @data where the beginning of the stored bytes begin
+ * @data: zone data.
+ */
+struct psz_buffer {
+#define PSZ_SIG (0x43474244) /* DBGC */
+ uint32_t sig;
+ atomic_t datalen;
+ atomic_t start;
+ uint8_t data[];
+};
+
+/**
+ * struct psz_kmsg_header - kmsg dump-specific header to flush to storage
+ *
+ * @magic: magic num for kmsg dump header
+ * @time: kmsg dump trigger time
+ * @compressed: whether conpressed
+ * @counter: kmsg dump counter
+ * @reason: the kmsg dump reason (e.g. oops, panic, etc)
+ * @data: pointer to log data
+ *
+ * This is a sub-header for a kmsg dump, trailing after &psz_buffer.
+ */
+struct psz_kmsg_header {
+#define PSTORE_KMSG_HEADER_MAGIC 0x4dfc3ae5 /* Just a random number */
+ uint32_t magic;
+ struct timespec64 time;
+ bool compressed;
+ uint32_t counter;
+ enum kmsg_dump_reason reason;
+ uint8_t data[];
+};
+
+/**
+ * struct pstore_zone - single stored buffer
+ *
+ * @off: zone offset of storage
+ * @type: front-end type for this zone
+ * @name: front-end name for this zone
+ * @buffer: pointer to data buffer managed by this zone
+ * @oldbuf: pointer to old data buffer
+ * @buffer_size: bytes in @buffer->data
+ * @should_recover: whether this zone should recover from storage
+ * @dirty: whether the data in @buffer dirty
+ *
+ * zone structure in memory.
+ */
+struct pstore_zone {
+ loff_t off;
+ const char *name;
+ enum pstore_type_id type;
+
+ struct psz_buffer *buffer;
+ struct psz_buffer *oldbuf;
+ size_t buffer_size;
+ bool should_recover;
+ atomic_t dirty;
+};
+
+/**
+ * struct psz_context - all about running state of pstore/zone
+ *
+ * @kpszs: kmsg dump storage zones
+ * @ppsz: pmsg storage zone
+ * @cpsz: console storage zone
+ * @fpszs: ftrace storage zones
+ * @kmsg_max_cnt: max count of @kpszs
+ * @kmsg_read_cnt: counter of total read kmsg dumps
+ * @kmsg_write_cnt: counter of total kmsg dump writes
+ * @pmsg_read_cnt: counter of total read pmsg zone
+ * @console_read_cnt: counter of total read console zone
+ * @ftrace_max_cnt: max count of @fpszs
+ * @ftrace_read_cnt: counter of max read ftrace zone
+ * @oops_counter: counter of oops dumps
+ * @panic_counter: counter of panic dumps
+ * @recovered: whether finished recovering data from storage
+ * @on_panic: whether panic is happening
+ * @pstore_zone_info_lock: lock to @pstore_zone_info
+ * @pstore_zone_info: information from backend
+ * @pstore: structure for pstore
+ */
+struct psz_context {
+ struct pstore_zone **kpszs;
+ struct pstore_zone *ppsz;
+ struct pstore_zone *cpsz;
+ struct pstore_zone **fpszs;
+ unsigned int kmsg_max_cnt;
+ unsigned int kmsg_read_cnt;
+ unsigned int kmsg_write_cnt;
+ unsigned int pmsg_read_cnt;
+ unsigned int console_read_cnt;
+ unsigned int ftrace_max_cnt;
+ unsigned int ftrace_read_cnt;
+ /*
+ * These counters should be calculated during recovery.
+ * It records the oops/panic times after crashes rather than boots.
+ */
+ unsigned int oops_counter;
+ unsigned int panic_counter;
+ atomic_t recovered;
+ atomic_t on_panic;
+
+ /*
+ * pstore_zone_info_lock protects this entire structure during calls
+ * to register_pstore_zone()/unregister_pstore_zone().
+ */
+ struct mutex pstore_zone_info_lock;
+ struct pstore_zone_info *pstore_zone_info;
+ struct pstore_info pstore;
+};
+static struct psz_context pstore_zone_cxt;
+
+static void psz_flush_all_dirty_zones(struct work_struct *);
+static DECLARE_DELAYED_WORK(psz_cleaner, psz_flush_all_dirty_zones);
+
+/**
+ * enum psz_flush_mode - flush mode for psz_zone_write()
+ *
+ * @FLUSH_NONE: do not flush to storage but update data on memory
+ * @FLUSH_PART: just flush part of data including meta data to storage
+ * @FLUSH_META: just flush meta data of zone to storage
+ * @FLUSH_ALL: flush all of zone
+ */
+enum psz_flush_mode {
+ FLUSH_NONE = 0,
+ FLUSH_PART,
+ FLUSH_META,
+ FLUSH_ALL,
+};
+
+static inline int buffer_datalen(struct pstore_zone *zone)
+{
+ return atomic_read(&zone->buffer->datalen);
+}
+
+static inline int buffer_start(struct pstore_zone *zone)
+{
+ return atomic_read(&zone->buffer->start);
+}
+
+static inline bool is_on_panic(void)
+{
+ return atomic_read(&pstore_zone_cxt.on_panic);
+}
+
+static ssize_t psz_zone_read_buffer(struct pstore_zone *zone, char *buf,
+ size_t len, unsigned long off)
+{
+ if (!buf || !zone || !zone->buffer)
+ return -EINVAL;
+ if (off > zone->buffer_size)
+ return -EINVAL;
+ len = min_t(size_t, len, zone->buffer_size - off);
+ memcpy(buf, zone->buffer->data + off, len);
+ return len;
+}
+
+static int psz_zone_read_oldbuf(struct pstore_zone *zone, char *buf,
+ size_t len, unsigned long off)
+{
+ if (!buf || !zone || !zone->oldbuf)
+ return -EINVAL;
+ if (off > zone->buffer_size)
+ return -EINVAL;
+ len = min_t(size_t, len, zone->buffer_size - off);
+ memcpy(buf, zone->oldbuf->data + off, len);
+ return 0;
+}
+
+static int psz_zone_write(struct pstore_zone *zone,
+ enum psz_flush_mode flush_mode, const char *buf,
+ size_t len, unsigned long off)
+{
+ struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
+ ssize_t wcnt = 0;
+ ssize_t (*writeop)(const char *buf, size_t bytes, loff_t pos);
+ size_t wlen;
+
+ if (off > zone->buffer_size)
+ return -EINVAL;
+
+ wlen = min_t(size_t, len, zone->buffer_size - off);
+ if (buf && wlen) {
+ memcpy(zone->buffer->data + off, buf, wlen);
+ atomic_set(&zone->buffer->datalen, wlen + off);
+ }
+
+ /* avoid to damage old records */
+ if (!is_on_panic() && !atomic_read(&pstore_zone_cxt.recovered))
+ goto dirty;
+
+ writeop = is_on_panic() ? info->panic_write : info->write;
+ if (!writeop)
+ goto dirty;
+
+ switch (flush_mode) {
+ case FLUSH_NONE:
+ if (unlikely(buf && wlen))
+ goto dirty;
+ return 0;
+ case FLUSH_PART:
+ wcnt = writeop((const char *)zone->buffer->data + off, wlen,
+ zone->off + sizeof(*zone->buffer) + off);
+ if (wcnt != wlen)
+ goto dirty;
+ fallthrough;
+ case FLUSH_META:
+ wlen = sizeof(struct psz_buffer);
+ wcnt = writeop((const char *)zone->buffer, wlen, zone->off);
+ if (wcnt != wlen)
+ goto dirty;
+ break;
+ case FLUSH_ALL:
+ wlen = zone->buffer_size + sizeof(*zone->buffer);
+ wcnt = writeop((const char *)zone->buffer, wlen, zone->off);
+ if (wcnt != wlen)
+ goto dirty;
+ break;
+ }
+
+ return 0;
+dirty:
+ /* no need to mark dirty if going to try next zone */
+ if (wcnt == -ENOMSG)
+ return -ENOMSG;
+ atomic_set(&zone->dirty, true);
+ /* flush dirty zones nicely */
+ if (wcnt == -EBUSY && !is_on_panic())
+ schedule_delayed_work(&psz_cleaner, msecs_to_jiffies(500));
+ return -EBUSY;
+}
+
+static int psz_flush_dirty_zone(struct pstore_zone *zone)
+{
+ int ret;
+
+ if (unlikely(!zone))
+ return -EINVAL;
+
+ if (unlikely(!atomic_read(&pstore_zone_cxt.recovered)))
+ return -EBUSY;
+
+ if (!atomic_xchg(&zone->dirty, false))
+ return 0;
+
+ ret = psz_zone_write(zone, FLUSH_ALL, NULL, 0, 0);
+ if (ret)
+ atomic_set(&zone->dirty, true);
+ return ret;
+}
+
+static int psz_flush_dirty_zones(struct pstore_zone **zones, unsigned int cnt)
+{
+ int i, ret;
+ struct pstore_zone *zone;
+
+ if (!zones)
+ return -EINVAL;
+
+ for (i = 0; i < cnt; i++) {
+ zone = zones[i];
+ if (!zone)
+ return -EINVAL;
+ ret = psz_flush_dirty_zone(zone);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int psz_move_zone(struct pstore_zone *old, struct pstore_zone *new)
+{
+ const char *data = (const char *)old->buffer->data;
+ int ret;
+
+ ret = psz_zone_write(new, FLUSH_ALL, data, buffer_datalen(old), 0);
+ if (ret) {
+ atomic_set(&new->buffer->datalen, 0);
+ atomic_set(&new->dirty, false);
+ return ret;
+ }
+ atomic_set(&old->buffer->datalen, 0);
+ return 0;
+}
+
+static void psz_flush_all_dirty_zones(struct work_struct *work)
+{
+ struct psz_context *cxt = &pstore_zone_cxt;
+ int ret = 0;
+
+ if (cxt->ppsz)
+ ret |= psz_flush_dirty_zone(cxt->ppsz);
+ if (cxt->cpsz)
+ ret |= psz_flush_dirty_zone(cxt->cpsz);
+ if (cxt->kpszs)
+ ret |= psz_flush_dirty_zones(cxt->kpszs, cxt->kmsg_max_cnt);
+ if (cxt->fpszs)
+ ret |= psz_flush_dirty_zones(cxt->fpszs, cxt->ftrace_max_cnt);
+ if (ret && cxt->pstore_zone_info)
+ schedule_delayed_work(&psz_cleaner, msecs_to_jiffies(1000));
+}
+
+static int psz_kmsg_recover_data(struct psz_context *cxt)
+{
+ struct pstore_zone_info *info = cxt->pstore_zone_info;
+ struct pstore_zone *zone = NULL;
+ struct psz_buffer *buf;
+ unsigned long i;
+ ssize_t rcnt;
+
+ if (!info->read)
+ return -EINVAL;
+
+ for (i = 0; i < cxt->kmsg_max_cnt; i++) {
+ zone = cxt->kpszs[i];
+ if (unlikely(!zone))
+ return -EINVAL;
+ if (atomic_read(&zone->dirty)) {
+ unsigned int wcnt = cxt->kmsg_write_cnt;
+ struct pstore_zone *new = cxt->kpszs[wcnt];
+ int ret;
+
+ ret = psz_move_zone(zone, new);
+ if (ret) {
+ pr_err("move zone from %lu to %d failed\n",
+ i, wcnt);
+ return ret;
+ }
+ cxt->kmsg_write_cnt = (wcnt + 1) % cxt->kmsg_max_cnt;
+ }
+ if (!zone->should_recover)
+ continue;
+ buf = zone->buffer;
+ rcnt = info->read((char *)buf, zone->buffer_size + sizeof(*buf),
+ zone->off);
+ if (rcnt != zone->buffer_size + sizeof(*buf))
+ return (int)rcnt < 0 ? (int)rcnt : -EIO;
+ }
+ return 0;
+}
+
+static int psz_kmsg_recover_meta(struct psz_context *cxt)
+{
+ struct pstore_zone_info *info = cxt->pstore_zone_info;
+ struct pstore_zone *zone;
+ size_t rcnt, len;
+ struct psz_buffer *buf;
+ struct psz_kmsg_header *hdr;
+ struct timespec64 time = { };
+ unsigned long i;
+ /*
+ * Recover may on panic, we can't allocate any memory by kmalloc.
+ * So, we use local array instead.
+ */
+ char buffer_header[sizeof(*buf) + sizeof(*hdr)] = {0};
+
+ if (!info->read)
+ return -EINVAL;
+
+ len = sizeof(*buf) + sizeof(*hdr);
+ buf = (struct psz_buffer *)buffer_header;
+ for (i = 0; i < cxt->kmsg_max_cnt; i++) {
+ zone = cxt->kpszs[i];
+ if (unlikely(!zone))
+ return -EINVAL;
+
+ rcnt = info->read((char *)buf, len, zone->off);
+ if (rcnt == -ENOMSG) {
+ pr_debug("%s with id %lu may be broken, skip\n",
+ zone->name, i);
+ continue;
+ } else if (rcnt != len) {
+ pr_err("read %s with id %lu failed\n", zone->name, i);
+ return (int)rcnt < 0 ? (int)rcnt : -EIO;
+ }
+
+ if (buf->sig != zone->buffer->sig) {
+ pr_debug("no valid data in kmsg dump zone %lu\n", i);
+ continue;
+ }
+
+ if (zone->buffer_size < atomic_read(&buf->datalen)) {
+ pr_info("found overtop zone: %s: id %lu, off %lld, size %zu\n",
+ zone->name, i, zone->off,
+ zone->buffer_size);
+ continue;
+ }
+
+ hdr = (struct psz_kmsg_header *)buf->data;
+ if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC) {
+ pr_info("found invalid zone: %s: id %lu, off %lld, size %zu\n",
+ zone->name, i, zone->off,
+ zone->buffer_size);
+ continue;
+ }
+
+ /*
+ * we get the newest zone, and the next one must be the oldest
+ * or unused zone, because we do write one by one like a circle.
+ */
+ if (hdr->time.tv_sec >= time.tv_sec) {
+ time.tv_sec = hdr->time.tv_sec;
+ cxt->kmsg_write_cnt = (i + 1) % cxt->kmsg_max_cnt;
+ }
+
+ if (hdr->reason == KMSG_DUMP_OOPS)
+ cxt->oops_counter =
+ max(cxt->oops_counter, hdr->counter);
+ else if (hdr->reason == KMSG_DUMP_PANIC)
+ cxt->panic_counter =
+ max(cxt->panic_counter, hdr->counter);
+
+ if (!atomic_read(&buf->datalen)) {
+ pr_debug("found erased zone: %s: id %lu, off %lld, size %zu, datalen %d\n",
+ zone->name, i, zone->off,
+ zone->buffer_size,
+ atomic_read(&buf->datalen));
+ continue;
+ }
+
+ if (!is_on_panic())
+ zone->should_recover = true;
+ pr_debug("found nice zone: %s: id %lu, off %lld, size %zu, datalen %d\n",
+ zone->name, i, zone->off,
+ zone->buffer_size, atomic_read(&buf->datalen));
+ }
+
+ return 0;
+}
+
+static int psz_kmsg_recover(struct psz_context *cxt)
+{
+ int ret;
+
+ if (!cxt->kpszs)
+ return 0;
+
+ ret = psz_kmsg_recover_meta(cxt);
+ if (ret)
+ goto recover_fail;
+
+ ret = psz_kmsg_recover_data(cxt);
+ if (ret)
+ goto recover_fail;
+
+ return 0;
+recover_fail:
+ pr_debug("psz_recover_kmsg failed\n");
+ return ret;
+}
+
+static int psz_recover_zone(struct psz_context *cxt, struct pstore_zone *zone)
+{
+ struct pstore_zone_info *info = cxt->pstore_zone_info;
+ struct psz_buffer *oldbuf, tmpbuf;
+ int ret = 0;
+ char *buf;
+ ssize_t rcnt, len, start, off;
+
+ if (!zone || zone->oldbuf)
+ return 0;
+
+ if (is_on_panic()) {
+ /* save data as much as possible */
+ psz_flush_dirty_zone(zone);
+ return 0;
+ }
+
+ if (unlikely(!info->read))
+ return -EINVAL;
+
+ len = sizeof(struct psz_buffer);
+ rcnt = info->read((char *)&tmpbuf, len, zone->off);
+ if (rcnt != len) {
+ pr_debug("read zone %s failed\n", zone->name);
+ return (int)rcnt < 0 ? (int)rcnt : -EIO;
+ }
+
+ if (tmpbuf.sig != zone->buffer->sig) {
+ pr_debug("no valid data in zone %s\n", zone->name);
+ return 0;
+ }
+
+ if (zone->buffer_size < atomic_read(&tmpbuf.datalen) ||
+ zone->buffer_size < atomic_read(&tmpbuf.start)) {
+ pr_info("found overtop zone: %s: off %lld, size %zu\n",
+ zone->name, zone->off, zone->buffer_size);
+ /* just keep going */
+ return 0;
+ }
+
+ if (!atomic_read(&tmpbuf.datalen)) {
+ pr_debug("found erased zone: %s: off %lld, size %zu, datalen %d\n",
+ zone->name, zone->off, zone->buffer_size,
+ atomic_read(&tmpbuf.datalen));
+ return 0;
+ }
+
+ pr_debug("found nice zone: %s: off %lld, size %zu, datalen %d\n",
+ zone->name, zone->off, zone->buffer_size,
+ atomic_read(&tmpbuf.datalen));
+
+ len = atomic_read(&tmpbuf.datalen) + sizeof(*oldbuf);
+ oldbuf = kzalloc(len, GFP_KERNEL);
+ if (!oldbuf)
+ return -ENOMEM;
+
+ memcpy(oldbuf, &tmpbuf, sizeof(*oldbuf));
+ buf = (char *)oldbuf + sizeof(*oldbuf);
+ len = atomic_read(&oldbuf->datalen);
+ start = atomic_read(&oldbuf->start);
+ off = zone->off + sizeof(*oldbuf);
+
+ /* get part of data */
+ rcnt = info->read(buf, len - start, off + start);
+ if (rcnt != len - start) {
+ pr_err("read zone %s failed\n", zone->name);
+ ret = (int)rcnt < 0 ? (int)rcnt : -EIO;
+ goto free_oldbuf;
+ }
+
+ /* get the rest of data */
+ rcnt = info->read(buf + len - start, start, off);
+ if (rcnt != start) {
+ pr_err("read zone %s failed\n", zone->name);
+ ret = (int)rcnt < 0 ? (int)rcnt : -EIO;
+ goto free_oldbuf;
+ }
+
+ zone->oldbuf = oldbuf;
+ psz_flush_dirty_zone(zone);
+ return 0;
+
+free_oldbuf:
+ kfree(oldbuf);
+ return ret;
+}
+
+static int psz_recover_zones(struct psz_context *cxt,
+ struct pstore_zone **zones, unsigned int cnt)
+{
+ int ret;
+ unsigned int i;
+ struct pstore_zone *zone;
+
+ if (!zones)
+ return 0;
+
+ for (i = 0; i < cnt; i++) {
+ zone = zones[i];
+ if (unlikely(!zone))
+ continue;
+ ret = psz_recover_zone(cxt, zone);
+ if (ret)
+ goto recover_fail;
+ }
+
+ return 0;
+recover_fail:
+ pr_debug("recover %s[%u] failed\n", zone->name, i);
+ return ret;
+}
+
+/**
+ * psz_recovery() - recover data from storage
+ * @cxt: the context of pstore/zone
+ *
+ * recovery means reading data back from storage after rebooting
+ *
+ * Return: 0 on success, others on failure.
+ */
+static inline int psz_recovery(struct psz_context *cxt)
+{
+ int ret;
+
+ if (atomic_read(&cxt->recovered))
+ return 0;
+
+ ret = psz_kmsg_recover(cxt);
+ if (ret)
+ goto out;
+
+ ret = psz_recover_zone(cxt, cxt->ppsz);
+ if (ret)
+ goto out;
+
+ ret = psz_recover_zone(cxt, cxt->cpsz);
+ if (ret)
+ goto out;
+
+ ret = psz_recover_zones(cxt, cxt->fpszs, cxt->ftrace_max_cnt);
+
+out:
+ if (unlikely(ret))
+ pr_err("recover failed\n");
+ else {
+ pr_debug("recover end!\n");
+ atomic_set(&cxt->recovered, 1);
+ }
+ return ret;
+}
+
+static int psz_pstore_open(struct pstore_info *psi)
+{
+ struct psz_context *cxt = psi->data;
+
+ cxt->kmsg_read_cnt = 0;
+ cxt->pmsg_read_cnt = 0;
+ cxt->console_read_cnt = 0;
+ cxt->ftrace_read_cnt = 0;
+ return 0;
+}
+
+static inline bool psz_old_ok(struct pstore_zone *zone)
+{
+ if (zone && zone->oldbuf && atomic_read(&zone->oldbuf->datalen))
+ return true;
+ return false;
+}
+
+static inline bool psz_ok(struct pstore_zone *zone)
+{
+ if (zone && zone->buffer && buffer_datalen(zone))
+ return true;
+ return false;
+}
+
+static inline int psz_kmsg_erase(struct psz_context *cxt,
+ struct pstore_zone *zone, struct pstore_record *record)
+{
+ struct psz_buffer *buffer = zone->buffer;
+ struct psz_kmsg_header *hdr =
+ (struct psz_kmsg_header *)buffer->data;
+ size_t size;
+
+ if (unlikely(!psz_ok(zone)))
+ return 0;
+
+ /* this zone is already updated, no need to erase */
+ if (record->count != hdr->counter)
+ return 0;
+
+ size = buffer_datalen(zone) + sizeof(*zone->buffer);
+ atomic_set(&zone->buffer->datalen, 0);
+ if (cxt->pstore_zone_info->erase)
+ return cxt->pstore_zone_info->erase(size, zone->off);
+ else
+ return psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
+}
+
+static inline int psz_record_erase(struct psz_context *cxt,
+ struct pstore_zone *zone)
+{
+ if (unlikely(!psz_old_ok(zone)))
+ return 0;
+
+ kfree(zone->oldbuf);
+ zone->oldbuf = NULL;
+ /*
+ * if there are new data in zone buffer, that means the old data
+ * are already invalid. It is no need to flush 0 (erase) to
+ * block device.
+ */
+ if (!buffer_datalen(zone))
+ return psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
+ psz_flush_dirty_zone(zone);
+ return 0;
+}
+
+static int psz_pstore_erase(struct pstore_record *record)
+{
+ struct psz_context *cxt = record->psi->data;
+
+ switch (record->type) {
+ case PSTORE_TYPE_DMESG:
+ if (record->id >= cxt->kmsg_max_cnt)
+ return -EINVAL;
+ return psz_kmsg_erase(cxt, cxt->kpszs[record->id], record);
+ case PSTORE_TYPE_PMSG:
+ return psz_record_erase(cxt, cxt->ppsz);
+ case PSTORE_TYPE_CONSOLE:
+ return psz_record_erase(cxt, cxt->cpsz);
+ case PSTORE_TYPE_FTRACE:
+ if (record->id >= cxt->ftrace_max_cnt)
+ return -EINVAL;
+ return psz_record_erase(cxt, cxt->fpszs[record->id]);
+ default: return -EINVAL;
+ }
+}
+
+static void psz_write_kmsg_hdr(struct pstore_zone *zone,
+ struct pstore_record *record)
+{
+ struct psz_context *cxt = record->psi->data;
+ struct psz_buffer *buffer = zone->buffer;
+ struct psz_kmsg_header *hdr =
+ (struct psz_kmsg_header *)buffer->data;
+
+ hdr->magic = PSTORE_KMSG_HEADER_MAGIC;
+ hdr->compressed = record->compressed;
+ hdr->time.tv_sec = record->time.tv_sec;
+ hdr->time.tv_nsec = record->time.tv_nsec;
+ hdr->reason = record->reason;
+ if (hdr->reason == KMSG_DUMP_OOPS)
+ hdr->counter = ++cxt->oops_counter;
+ else if (hdr->reason == KMSG_DUMP_PANIC)
+ hdr->counter = ++cxt->panic_counter;
+ else
+ hdr->counter = 0;
+}
+
+/*
+ * In case zone is broken, which may occur to MTD device, we try each zones,
+ * start at cxt->kmsg_write_cnt.
+ */
+static inline int notrace psz_kmsg_write_record(struct psz_context *cxt,
+ struct pstore_record *record)
+{
+ size_t size, hlen;
+ struct pstore_zone *zone;
+ unsigned int i;
+
+ for (i = 0; i < cxt->kmsg_max_cnt; i++) {
+ unsigned int zonenum, len;
+ int ret;
+
+ zonenum = (cxt->kmsg_write_cnt + i) % cxt->kmsg_max_cnt;
+ zone = cxt->kpszs[zonenum];
+ if (unlikely(!zone))
+ return -ENOSPC;
+
+ /* avoid destroying old data, allocate a new one */
+ len = zone->buffer_size + sizeof(*zone->buffer);
+ zone->oldbuf = zone->buffer;
+ zone->buffer = kzalloc(len, GFP_KERNEL);
+ if (!zone->buffer) {
+ zone->buffer = zone->oldbuf;
+ return -ENOMEM;
+ }
+ zone->buffer->sig = zone->oldbuf->sig;
+
+ pr_debug("write %s to zone id %d\n", zone->name, zonenum);
+ psz_write_kmsg_hdr(zone, record);
+ hlen = sizeof(struct psz_kmsg_header);
+ size = min_t(size_t, record->size, zone->buffer_size - hlen);
+ ret = psz_zone_write(zone, FLUSH_ALL, record->buf, size, hlen);
+ if (likely(!ret || ret != -ENOMSG)) {
+ cxt->kmsg_write_cnt = zonenum + 1;
+ cxt->kmsg_write_cnt %= cxt->kmsg_max_cnt;
+ /* no need to try next zone, free last zone buffer */
+ kfree(zone->oldbuf);
+ zone->oldbuf = NULL;
+ return ret;
+ }
+
+ pr_debug("zone %u may be broken, try next dmesg zone\n",
+ zonenum);
+ kfree(zone->buffer);
+ zone->buffer = zone->oldbuf;
+ zone->oldbuf = NULL;
+ }
+
+ return -EBUSY;
+}
+
+static int notrace psz_kmsg_write(struct psz_context *cxt,
+ struct pstore_record *record)
+{
+ int ret;
+
+ /*
+ * Explicitly only take the first part of any new crash.
+ * If our buffer is larger than kmsg_bytes, this can never happen,
+ * and if our buffer is smaller than kmsg_bytes, we don't want the
+ * report split across multiple records.
+ */
+ if (record->part != 1)
+ return -ENOSPC;
+
+ if (!cxt->kpszs)
+ return -ENOSPC;
+
+ ret = psz_kmsg_write_record(cxt, record);
+ if (!ret && is_on_panic()) {
+ /* ensure all data are flushed to storage when panic */
+ pr_debug("try to flush other dirty zones\n");
+ psz_flush_all_dirty_zones(NULL);
+ }
+
+ /* always return 0 as we had handled it on buffer */
+ return 0;
+}
+
+static int notrace psz_record_write(struct pstore_zone *zone,
+ struct pstore_record *record)
+{
+ size_t start, rem;
+ bool is_full_data = false;
+ char *buf;
+ int cnt;
+
+ if (!zone || !record)
+ return -ENOSPC;
+
+ if (atomic_read(&zone->buffer->datalen) >= zone->buffer_size)
+ is_full_data = true;
+
+ cnt = record->size;
+ buf = record->buf;
+ if (unlikely(cnt > zone->buffer_size)) {
+ buf += cnt - zone->buffer_size;
+ cnt = zone->buffer_size;
+ }
+
+ start = buffer_start(zone);
+ rem = zone->buffer_size - start;
+ if (unlikely(rem < cnt)) {
+ psz_zone_write(zone, FLUSH_PART, buf, rem, start);
+ buf += rem;
+ cnt -= rem;
+ start = 0;
+ is_full_data = true;
+ }
+
+ atomic_set(&zone->buffer->start, cnt + start);
+ psz_zone_write(zone, FLUSH_PART, buf, cnt, start);
+
+ /**
+ * psz_zone_write will set datalen as start + cnt.
+ * It work if actual data length lesser than buffer size.
+ * If data length greater than buffer size, pmsg will rewrite to
+ * beginning of zone, which make buffer->datalen wrongly.
+ * So we should reset datalen as buffer size once actual data length
+ * greater than buffer size.
+ */
+ if (is_full_data) {
+ atomic_set(&zone->buffer->datalen, zone->buffer_size);
+ psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
+ }
+ return 0;
+}
+
+static int notrace psz_pstore_write(struct pstore_record *record)
+{
+ struct psz_context *cxt = record->psi->data;
+
+ if (record->type == PSTORE_TYPE_DMESG &&
+ record->reason == KMSG_DUMP_PANIC)
+ atomic_set(&cxt->on_panic, 1);
+
+ /*
+ * if on panic, do not write except panic records
+ * Fix case that panic_write prints log which wakes up console backend.
+ */
+ if (is_on_panic() && record->type != PSTORE_TYPE_DMESG)
+ return -EBUSY;
+
+ switch (record->type) {
+ case PSTORE_TYPE_DMESG:
+ return psz_kmsg_write(cxt, record);
+ case PSTORE_TYPE_CONSOLE:
+ return psz_record_write(cxt->cpsz, record);
+ case PSTORE_TYPE_PMSG:
+ return psz_record_write(cxt->ppsz, record);
+ case PSTORE_TYPE_FTRACE: {
+ int zonenum = smp_processor_id();
+
+ if (!cxt->fpszs)
+ return -ENOSPC;
+ return psz_record_write(cxt->fpszs[zonenum], record);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct pstore_zone *psz_read_next_zone(struct psz_context *cxt)
+{
+ struct pstore_zone *zone = NULL;
+
+ while (cxt->kmsg_read_cnt < cxt->kmsg_max_cnt) {
+ zone = cxt->kpszs[cxt->kmsg_read_cnt++];
+ if (psz_ok(zone))
+ return zone;
+ }
+
+ if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt)
+ /*
+ * No need psz_old_ok(). Let psz_ftrace_read() do so for
+ * combination. psz_ftrace_read() should traverse over
+ * all zones in case of some zone without data.
+ */
+ return cxt->fpszs[cxt->ftrace_read_cnt++];
+
+ if (cxt->pmsg_read_cnt == 0) {
+ cxt->pmsg_read_cnt++;
+ zone = cxt->ppsz;
+ if (psz_old_ok(zone))
+ return zone;
+ }
+
+ if (cxt->console_read_cnt == 0) {
+ cxt->console_read_cnt++;
+ zone = cxt->cpsz;
+ if (psz_old_ok(zone))
+ return zone;
+ }
+
+ return NULL;
+}
+
+static int psz_kmsg_read_hdr(struct pstore_zone *zone,
+ struct pstore_record *record)
+{
+ struct psz_buffer *buffer = zone->buffer;
+ struct psz_kmsg_header *hdr =
+ (struct psz_kmsg_header *)buffer->data;
+
+ if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC)
+ return -EINVAL;
+ record->compressed = hdr->compressed;
+ record->time.tv_sec = hdr->time.tv_sec;
+ record->time.tv_nsec = hdr->time.tv_nsec;
+ record->reason = hdr->reason;
+ record->count = hdr->counter;
+ return 0;
+}
+
+static ssize_t psz_kmsg_read(struct pstore_zone *zone,
+ struct pstore_record *record)
+{
+ ssize_t size, hlen = 0;
+
+ size = buffer_datalen(zone);
+ /* Clear and skip this kmsg dump record if it has no valid header */
+ if (psz_kmsg_read_hdr(zone, record)) {
+ atomic_set(&zone->buffer->datalen, 0);
+ atomic_set(&zone->dirty, 0);
+ return -ENOMSG;
+ }
+ size -= sizeof(struct psz_kmsg_header);
+
+ if (!record->compressed) {
+ char *buf = kasprintf(GFP_KERNEL, "%s: Total %d times\n",
+ kmsg_dump_reason_str(record->reason),
+ record->count);
+ hlen = strlen(buf);
+ record->buf = krealloc(buf, hlen + size, GFP_KERNEL);
+ if (!record->buf) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+ } else {
+ record->buf = kmalloc(size, GFP_KERNEL);
+ if (!record->buf)
+ return -ENOMEM;
+ }
+
+ size = psz_zone_read_buffer(zone, record->buf + hlen, size,
+ sizeof(struct psz_kmsg_header));
+ if (unlikely(size < 0)) {
+ kfree(record->buf);
+ return -ENOMSG;
+ }
+
+ return size + hlen;
+}
+
+/* try to combine all ftrace zones */
+static ssize_t psz_ftrace_read(struct pstore_zone *zone,
+ struct pstore_record *record)
+{
+ struct psz_context *cxt;
+ struct psz_buffer *buf;
+ int ret;
+
+ if (!zone || !record)
+ return -ENOSPC;
+
+ if (!psz_old_ok(zone))
+ goto out;
+
+ buf = (struct psz_buffer *)zone->oldbuf;
+ if (!buf)
+ return -ENOMSG;
+
+ ret = pstore_ftrace_combine_log(&record->buf, &record->size,
+ (char *)buf->data, atomic_read(&buf->datalen));
+ if (unlikely(ret))
+ return ret;
+
+out:
+ cxt = record->psi->data;
+ if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt)
+ /* then, read next ftrace zone */
+ return -ENOMSG;
+ record->id = 0;
+ return record->size ? record->size : -ENOMSG;
+}
+
+static ssize_t psz_record_read(struct pstore_zone *zone,
+ struct pstore_record *record)
+{
+ size_t len;
+ struct psz_buffer *buf;
+
+ if (!zone || !record)
+ return -ENOSPC;
+
+ buf = (struct psz_buffer *)zone->oldbuf;
+ if (!buf)
+ return -ENOMSG;
+
+ len = atomic_read(&buf->datalen);
+ record->buf = kmalloc(len, GFP_KERNEL);
+ if (!record->buf)
+ return -ENOMEM;
+
+ if (unlikely(psz_zone_read_oldbuf(zone, record->buf, len, 0))) {
+ kfree(record->buf);
+ return -ENOMSG;
+ }
+
+ return len;
+}
+
+static ssize_t psz_pstore_read(struct pstore_record *record)
+{
+ struct psz_context *cxt = record->psi->data;
+ ssize_t (*readop)(struct pstore_zone *zone,
+ struct pstore_record *record);
+ struct pstore_zone *zone;
+ ssize_t ret;
+
+ /* before read, we must recover from storage */
+ ret = psz_recovery(cxt);
+ if (ret)
+ return ret;
+
+next_zone:
+ zone = psz_read_next_zone(cxt);
+ if (!zone)
+ return 0;
+
+ record->type = zone->type;
+ switch (record->type) {
+ case PSTORE_TYPE_DMESG:
+ readop = psz_kmsg_read;
+ record->id = cxt->kmsg_read_cnt - 1;
+ break;
+ case PSTORE_TYPE_FTRACE:
+ readop = psz_ftrace_read;
+ break;
+ case PSTORE_TYPE_CONSOLE:
+ fallthrough;
+ case PSTORE_TYPE_PMSG:
+ readop = psz_record_read;
+ break;
+ default:
+ goto next_zone;
+ }
+
+ ret = readop(zone, record);
+ if (ret == -ENOMSG)
+ goto next_zone;
+ return ret;
+}
+
+static struct psz_context pstore_zone_cxt = {
+ .pstore_zone_info_lock =
+ __MUTEX_INITIALIZER(pstore_zone_cxt.pstore_zone_info_lock),
+ .recovered = ATOMIC_INIT(0),
+ .on_panic = ATOMIC_INIT(0),
+ .pstore = {
+ .owner = THIS_MODULE,
+ .open = psz_pstore_open,
+ .read = psz_pstore_read,
+ .write = psz_pstore_write,
+ .erase = psz_pstore_erase,
+ },
+};
+
+static void psz_free_zone(struct pstore_zone **pszone)
+{
+ struct pstore_zone *zone = *pszone;
+
+ if (!zone)
+ return;
+
+ kfree(zone->buffer);
+ kfree(zone);
+ *pszone = NULL;
+}
+
+static void psz_free_zones(struct pstore_zone ***pszones, unsigned int *cnt)
+{
+ struct pstore_zone **zones = *pszones;
+
+ if (!zones)
+ return;
+
+ while (*cnt > 0) {
+ (*cnt)--;
+ psz_free_zone(&(zones[*cnt]));
+ }
+ kfree(zones);
+ *pszones = NULL;
+}
+
+static void psz_free_all_zones(struct psz_context *cxt)
+{
+ if (cxt->kpszs)
+ psz_free_zones(&cxt->kpszs, &cxt->kmsg_max_cnt);
+ if (cxt->ppsz)
+ psz_free_zone(&cxt->ppsz);
+ if (cxt->cpsz)
+ psz_free_zone(&cxt->cpsz);
+ if (cxt->fpszs)
+ psz_free_zones(&cxt->fpszs, &cxt->ftrace_max_cnt);
+}
+
+static struct pstore_zone *psz_init_zone(enum pstore_type_id type,
+ loff_t *off, size_t size)
+{
+ struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
+ struct pstore_zone *zone;
+ const char *name = pstore_type_to_name(type);
+
+ if (!size)
+ return NULL;
+
+ if (*off + size > info->total_size) {
+ pr_err("no room for %s (0x%zx@0x%llx over 0x%lx)\n",
+ name, size, *off, info->total_size);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ zone = kzalloc(sizeof(struct pstore_zone), GFP_KERNEL);
+ if (!zone)
+ return ERR_PTR(-ENOMEM);
+
+ zone->buffer = kmalloc(size, GFP_KERNEL);
+ if (!zone->buffer) {
+ kfree(zone);
+ return ERR_PTR(-ENOMEM);
+ }
+ memset(zone->buffer, 0xFF, size);
+ zone->off = *off;
+ zone->name = name;
+ zone->type = type;
+ zone->buffer_size = size - sizeof(struct psz_buffer);
+ zone->buffer->sig = type ^ PSZ_SIG;
+ zone->oldbuf = NULL;
+ atomic_set(&zone->dirty, 0);
+ atomic_set(&zone->buffer->datalen, 0);
+ atomic_set(&zone->buffer->start, 0);
+
+ *off += size;
+
+ pr_debug("pszone %s: off 0x%llx, %zu header, %zu data\n", zone->name,
+ zone->off, sizeof(*zone->buffer), zone->buffer_size);
+ return zone;
+}
+
+static struct pstore_zone **psz_init_zones(enum pstore_type_id type,
+ loff_t *off, size_t total_size, ssize_t record_size,
+ unsigned int *cnt)
+{
+ struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
+ struct pstore_zone **zones, *zone;
+ const char *name = pstore_type_to_name(type);
+ int c, i;
+
+ *cnt = 0;
+ if (!total_size || !record_size)
+ return NULL;
+
+ if (*off + total_size > info->total_size) {
+ pr_err("no room for zones %s (0x%zx@0x%llx over 0x%lx)\n",
+ name, total_size, *off, info->total_size);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ c = total_size / record_size;
+ zones = kcalloc(c, sizeof(*zones), GFP_KERNEL);
+ if (!zones) {
+ pr_err("allocate for zones %s failed\n", name);
+ return ERR_PTR(-ENOMEM);
+ }
+ memset(zones, 0, c * sizeof(*zones));
+
+ for (i = 0; i < c; i++) {
+ zone = psz_init_zone(type, off, record_size);
+ if (!zone || IS_ERR(zone)) {
+ pr_err("initialize zones %s failed\n", name);
+ psz_free_zones(&zones, &i);
+ return (void *)zone;
+ }
+ zones[i] = zone;
+ }
+
+ *cnt = c;
+ return zones;
+}
+
+static int psz_alloc_zones(struct psz_context *cxt)
+{
+ struct pstore_zone_info *info = cxt->pstore_zone_info;
+ loff_t off = 0;
+ int err;
+ size_t off_size = 0;
+
+ off_size += info->pmsg_size;
+ cxt->ppsz = psz_init_zone(PSTORE_TYPE_PMSG, &off, info->pmsg_size);
+ if (IS_ERR(cxt->ppsz)) {
+ err = PTR_ERR(cxt->ppsz);
+ cxt->ppsz = NULL;
+ goto free_out;
+ }
+
+ off_size += info->console_size;
+ cxt->cpsz = psz_init_zone(PSTORE_TYPE_CONSOLE, &off,
+ info->console_size);
+ if (IS_ERR(cxt->cpsz)) {
+ err = PTR_ERR(cxt->cpsz);
+ cxt->cpsz = NULL;
+ goto free_out;
+ }
+
+ off_size += info->ftrace_size;
+ cxt->fpszs = psz_init_zones(PSTORE_TYPE_FTRACE, &off,
+ info->ftrace_size,
+ info->ftrace_size / nr_cpu_ids,
+ &cxt->ftrace_max_cnt);
+ if (IS_ERR(cxt->fpszs)) {
+ err = PTR_ERR(cxt->fpszs);
+ cxt->fpszs = NULL;
+ goto free_out;
+ }
+
+ cxt->kpszs = psz_init_zones(PSTORE_TYPE_DMESG, &off,
+ info->total_size - off_size,
+ info->kmsg_size, &cxt->kmsg_max_cnt);
+ if (IS_ERR(cxt->kpszs)) {
+ err = PTR_ERR(cxt->kpszs);
+ cxt->kpszs = NULL;
+ goto free_out;
+ }
+
+ return 0;
+free_out:
+ psz_free_all_zones(cxt);
+ return err;
+}
+
+/**
+ * register_pstore_zone() - register to pstore/zone
+ *
+ * @info: back-end driver information. See &struct pstore_zone_info.
+ *
+ * Only one back-end at one time.
+ *
+ * Return: 0 on success, others on failure.
+ */
+int register_pstore_zone(struct pstore_zone_info *info)
+{
+ int err = -EINVAL;
+ struct psz_context *cxt = &pstore_zone_cxt;
+
+ if (info->total_size < 4096) {
+ pr_warn("total_size must be >= 4096\n");
+ return -EINVAL;
+ }
+
+ if (!info->kmsg_size && !info->pmsg_size && !info->console_size &&
+ !info->ftrace_size) {
+ pr_warn("at least one record size must be non-zero\n");
+ return -EINVAL;
+ }
+
+ if (!info->name || !info->name[0])
+ return -EINVAL;
+
+#define check_size(name, size) { \
+ if (info->name > 0 && info->name < (size)) { \
+ pr_err(#name " must be over %d\n", (size)); \
+ return -EINVAL; \
+ } \
+ if (info->name & (size - 1)) { \
+ pr_err(#name " must be a multiple of %d\n", \
+ (size)); \
+ return -EINVAL; \
+ } \
+ }
+
+ check_size(total_size, 4096);
+ check_size(kmsg_size, SECTOR_SIZE);
+ check_size(pmsg_size, SECTOR_SIZE);
+ check_size(console_size, SECTOR_SIZE);
+ check_size(ftrace_size, SECTOR_SIZE);
+
+#undef check_size
+
+ /*
+ * the @read and @write must be applied.
+ * if no @read, pstore may mount failed.
+ * if no @write, pstore do not support to remove record file.
+ */
+ if (!info->read || !info->write) {
+ pr_err("no valid general read/write interface\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&cxt->pstore_zone_info_lock);
+ if (cxt->pstore_zone_info) {
+ pr_warn("'%s' already loaded: ignoring '%s'\n",
+ cxt->pstore_zone_info->name, info->name);
+ mutex_unlock(&cxt->pstore_zone_info_lock);
+ return -EBUSY;
+ }
+ cxt->pstore_zone_info = info;
+
+ pr_debug("register %s with properties:\n", info->name);
+ pr_debug("\ttotal size : %ld Bytes\n", info->total_size);
+ pr_debug("\tkmsg size : %ld Bytes\n", info->kmsg_size);
+ pr_debug("\tpmsg size : %ld Bytes\n", info->pmsg_size);
+ pr_debug("\tconsole size : %ld Bytes\n", info->console_size);
+ pr_debug("\tftrace size : %ld Bytes\n", info->ftrace_size);
+
+ err = psz_alloc_zones(cxt);
+ if (err) {
+ pr_err("alloc zones failed\n");
+ goto fail_out;
+ }
+
+ if (info->kmsg_size) {
+ cxt->pstore.bufsize = cxt->kpszs[0]->buffer_size -
+ sizeof(struct psz_kmsg_header);
+ cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
+ if (!cxt->pstore.buf) {
+ err = -ENOMEM;
+ goto fail_free;
+ }
+ }
+ cxt->pstore.data = cxt;
+
+ pr_info("registered %s as backend for", info->name);
+ cxt->pstore.max_reason = info->max_reason;
+ cxt->pstore.name = info->name;
+ if (info->kmsg_size) {
+ cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
+ pr_cont(" kmsg(%s",
+ kmsg_dump_reason_str(cxt->pstore.max_reason));
+ if (cxt->pstore_zone_info->panic_write)
+ pr_cont(",panic_write");
+ pr_cont(")");
+ }
+ if (info->pmsg_size) {
+ cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
+ pr_cont(" pmsg");
+ }
+ if (info->console_size) {
+ cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
+ pr_cont(" console");
+ }
+ if (info->ftrace_size) {
+ cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
+ pr_cont(" ftrace");
+ }
+ pr_cont("\n");
+
+ err = pstore_register(&cxt->pstore);
+ if (err) {
+ pr_err("registering with pstore failed\n");
+ goto fail_free;
+ }
+ mutex_unlock(&pstore_zone_cxt.pstore_zone_info_lock);
+
+ return 0;
+
+fail_free:
+ kfree(cxt->pstore.buf);
+ cxt->pstore.buf = NULL;
+ cxt->pstore.bufsize = 0;
+ psz_free_all_zones(cxt);
+fail_out:
+ pstore_zone_cxt.pstore_zone_info = NULL;
+ mutex_unlock(&pstore_zone_cxt.pstore_zone_info_lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(register_pstore_zone);
+
+/**
+ * unregister_pstore_zone() - unregister to pstore/zone
+ *
+ * @info: back-end driver information. See struct pstore_zone_info.
+ */
+void unregister_pstore_zone(struct pstore_zone_info *info)
+{
+ struct psz_context *cxt = &pstore_zone_cxt;
+
+ mutex_lock(&cxt->pstore_zone_info_lock);
+ if (!cxt->pstore_zone_info) {
+ mutex_unlock(&cxt->pstore_zone_info_lock);
+ return;
+ }
+
+ /* Stop incoming writes from pstore. */
+ pstore_unregister(&cxt->pstore);
+
+ /* Flush any pending writes. */
+ psz_flush_all_dirty_zones(NULL);
+ flush_delayed_work(&psz_cleaner);
+
+ /* Clean up allocations. */
+ kfree(cxt->pstore.buf);
+ cxt->pstore.buf = NULL;
+ cxt->pstore.bufsize = 0;
+ cxt->pstore_zone_info = NULL;
+
+ psz_free_all_zones(cxt);
+
+ /* Clear counters and zone state. */
+ cxt->oops_counter = 0;
+ cxt->panic_counter = 0;
+ atomic_set(&cxt->recovered, 0);
+ atomic_set(&cxt->on_panic, 0);
+
+ mutex_unlock(&cxt->pstore_zone_info_lock);
+}
+EXPORT_SYMBOL_GPL(unregister_pstore_zone);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>");
+MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
+MODULE_DESCRIPTION("Storage Manager for pstore/blk");