aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSrinivas Kandagatla <srinivas.kandagatla@linaro.org>2016-09-28 01:33:41 +0100
committerSrinivas Kandagatla <srinivas.kandagatla@linaro.org>2016-09-28 01:33:41 +0100
commit7e161bc63f89ab823a44edb1fc84d74579491796 (patch)
treeea07fe883f86b73b2b6aeff9865ef153def6433f
parentcfd9144af147bc93073f9c85ff24541cffe1f0b9 (diff)
parent3b03b9f27cf1c3217fdef58a7bd429c481432121 (diff)
Merge branch 'tracking-qcomlt-iommu' into v4.8-linuxv4.8-linux-for-rob
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.txt52
-rw-r--r--Documentation/devicetree/bindings/pci/pci-iommu.txt171
-rw-r--r--arch/arm/mm/dma-mapping.c9
-rw-r--r--arch/arm64/include/asm/cacheflush.h6
-rw-r--r--arch/arm64/mm/cache.S4
-rw-r--r--drivers/base/platform.c13
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/arm-smmu.c818
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/iommu/iommu.c12
-rw-r--r--drivers/iommu/msm_iommu.c17
-rw-r--r--drivers/iommu/msm_iommu.h3
-rw-r--r--drivers/iommu/of_iommu.c59
-rw-r--r--drivers/iommu/qcom/Kconfig44
-rw-r--r--drivers/iommu/qcom/Makefile7
-rw-r--r--drivers/iommu/qcom/msm_iommu-v1.c1540
-rw-r--r--drivers/iommu/qcom/msm_iommu.c206
-rw-r--r--drivers/iommu/qcom/msm_iommu_dev-v1.c708
-rw-r--r--drivers/iommu/qcom/msm_iommu_hw-v1.h2320
-rw-r--r--drivers/iommu/qcom/msm_iommu_pagetable.c645
-rw-r--r--drivers/iommu/qcom/msm_iommu_pagetable.h33
-rw-r--r--drivers/iommu/qcom/msm_iommu_perfmon.h233
-rw-r--r--drivers/iommu/qcom/msm_iommu_priv.h71
-rw-r--r--drivers/iommu/qcom/msm_iommu_sec.c795
-rw-r--r--drivers/of/address.c20
-rw-r--r--drivers/of/device.c77
-rw-r--r--drivers/of/irq.c70
-rw-r--r--drivers/of/of_pci.c102
-rw-r--r--drivers/of/platform.c16
-rw-r--r--drivers/pci/probe.c3
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/msm_iommu_domains.h239
-rw-r--r--include/linux/of_device.h14
-rw-r--r--include/linux/of_pci.h8
-rw-r--r--include/linux/qcom_iommu.h388
-rw-r--r--include/soc/qcom/socinfo.h610
38 files changed, 8815 insertions, 508 deletions
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 19fe6f2c83f61..eb23442cc7832 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -35,12 +35,11 @@ conditions.
interrupt per context bank. In the case of a single,
combined interrupt, it must be listed multiple times.
-- mmu-masters : A list of phandles to device nodes representing bus
- masters for which the SMMU can provide a translation
- and their corresponding StreamIDs (see example below).
- Each device node linked from this list must have a
- "#stream-id-cells" property, indicating the number of
- StreamIDs associated with it.
+- #iommu-cells : See Documentation/devicetree/bindings/iommu/iommu.txt
+ for details. Should be 1, where each "iommus" entry on
+ the device represents a distinct stream ID emitted by
+ that device into the relevant SMMU. Cells beyond 1 are
+ reserved for future use.
** System MMU optional properties:
@@ -55,10 +54,34 @@ conditions.
SMMU configuration registers. In this case non-secure
aliases of secure registers have to be used during
SMMU configuration.
+- qcom,skip-init : Disable resetting configuration for all context banks
+ during device reset. This is useful for targets where
+ some context banks are dedicated to other execution
+ environments outside of Linux and those other EEs are
+ programming their own stream match tables, SCTLR, etc.
+ Without setting this option we will trample on their
+ configuration.
+- qcom,no-smr-check : Usually when an SMMU probes we do a sanity check on
+ the SMR registers to make sure they can fully support all
+ of the mask bits. This check can cause problems for use
+ cases where the SMMU is already in use when the SMMU
+ probes. For example, for continuous splash screen
+ support, the stream matching table is programmed before
+ control is even turned over to Linux.
+
+** Deprecated properties:
+
+- mmu-masters (deprecated in favour of the generic "iommus" binding) :
+ A list of phandles to device nodes representing bus
+ masters for which the SMMU can provide a translation
+ and their corresponding StreamIDs (see example below).
+ Each device node linked from this list must have a
+ "#stream-id-cells" property, indicating the number of
+ StreamIDs associated with it.
-Example:
+** Example:
- smmu {
+ smmu1: iommu {
compatible = "arm,smmu-v1";
reg = <0xba5e0000 0x10000>;
#global-interrupts = <2>;
@@ -68,11 +91,12 @@ Example:
<0 35 4>,
<0 36 4>,
<0 37 4>;
+ #iommu-cells = <1>;
+ };
- /*
- * Two DMA controllers, the first with two StreamIDs (0xd01d
- * and 0xd01e) and the second with only one (0xd11c).
- */
- mmu-masters = <&dma0 0xd01d 0xd01e>,
- <&dma1 0xd11c>;
+ /* device with two stream IDs, 0 and 7 */
+ master {
+ iommus = <&smmu1 0>,
+ <&smmu1 7>;
};
+
diff --git a/Documentation/devicetree/bindings/pci/pci-iommu.txt b/Documentation/devicetree/bindings/pci/pci-iommu.txt
new file mode 100644
index 0000000000000..56c829621b9a8
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/pci-iommu.txt
@@ -0,0 +1,171 @@
+This document describes the generic device tree binding for describing the
+relationship between PCI(e) devices and IOMMU(s).
+
+Each PCI(e) device under a root complex is uniquely identified by its Requester
+ID (AKA RID). A Requester ID is a triplet of a Bus number, Device number, and
+Function number.
+
+For the purpose of this document, when treated as a numeric value, a RID is
+formatted such that:
+
+* Bits [15:8] are the Bus number.
+* Bits [7:3] are the Device number.
+* Bits [2:0] are the Function number.
+* Any other bits required for padding must be zero.
+
+IOMMUs may distinguish PCI devices through sideband data derived from the
+Requester ID. While a given PCI device can only master through one IOMMU, a
+root complex may split masters across a set of IOMMUs (e.g. with one IOMMU per
+bus).
+
+The generic 'iommus' property is insufficient to describe this relationship,
+and a mechanism is required to map from a PCI device to its IOMMU and sideband
+data.
+
+For generic IOMMU bindings, see
+Documentation/devicetree/bindings/iommu/iommu.txt.
+
+
+PCI root complex
+================
+
+Optional properties
+-------------------
+
+- iommu-map: Maps a Requester ID to an IOMMU and associated iommu-specifier
+ data.
+
+ The property is an arbitrary number of tuples of
+ (rid-base,iommu,iommu-base,length).
+
+ Any RID r in the interval [rid-base, rid-base + length) is associated with
+ the listed IOMMU, with the iommu-specifier (r - rid-base + iommu-base).
+
+- iommu-map-mask: A mask to be applied to each Requester ID prior to being
+ mapped to an iommu-specifier per the iommu-map property.
+
+
+Example (1)
+===========
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ iommu: iommu@a {
+ reg = <0xa 0x1>;
+ compatible = "vendor,some-iommu";
+ #iommu-cells = <1>;
+ };
+
+ pci: pci@f {
+ reg = <0xf 0x1>;
+ compatible = "vendor,pcie-root-complex";
+ device_type = "pci";
+
+ /*
+ * The sideband data provided to the IOMMU is the RID,
+ * identity-mapped.
+ */
+ iommu-map = <0x0 &iommu 0x0 0x10000>;
+ };
+};
+
+
+Example (2)
+===========
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ iommu: iommu@a {
+ reg = <0xa 0x1>;
+ compatible = "vendor,some-iommu";
+ #iommu-cells = <1>;
+ };
+
+ pci: pci@f {
+ reg = <0xf 0x1>;
+ compatible = "vendor,pcie-root-complex";
+ device_type = "pci";
+
+ /*
+ * The sideband data provided to the IOMMU is the RID with the
+ * function bits masked out.
+ */
+ iommu-map = <0x0 &iommu 0x0 0x10000>;
+ iommu-map-mask = <0xfff8>;
+ };
+};
+
+
+Example (3)
+===========
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ iommu: iommu@a {
+ reg = <0xa 0x1>;
+ compatible = "vendor,some-iommu";
+ #iommu-cells = <1>;
+ };
+
+ pci: pci@f {
+ reg = <0xf 0x1>;
+ compatible = "vendor,pcie-root-complex";
+ device_type = "pci";
+
+ /*
+ * The sideband data provided to the IOMMU is the RID,
+ * but the high bits of the bus number are flipped.
+ */
+ iommu-map = <0x0000 &iommu 0x8000 0x8000>,
+ <0x8000 &iommu 0x0000 0x8000>;
+ };
+};
+
+
+Example (4)
+===========
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ iommu_a: iommu@a {
+ reg = <0xa 0x1>;
+ compatible = "vendor,some-iommu";
+ #iommu-cells = <1>;
+ };
+
+ iommu_b: iommu@b {
+ reg = <0xb 0x1>;
+ compatible = "vendor,some-iommu";
+ #iommu-cells = <1>;
+ };
+
+ iommu_c: iommu@c {
+ reg = <0xc 0x1>;
+ compatible = "vendor,some-iommu";
+ #iommu-cells = <1>;
+ };
+
+ pci: pci@f {
+ reg = <0xf 0x1>;
+ compatible = "vendor,pcie-root-complex";
+ device_type = "pci";
+
+ /*
+ * Devices with bus number 0-127 are mastered via IOMMU
+ * a, with sideband data being RID[14:0].
+ * Devices with bus number 128-255 are mastered via
+ * IOMMU b, with sideband data being RID[14:0].
+ * No devices master via IOMMU c.
+ */
+ iommu-map = <0x0000 &iommu_a 0x0000 0x8000>,
+ <0x8000 &iommu_b 0x0000 0x8000>;
+ };
+};
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c6834c0cfd1cf..dde65144ad730 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2322,6 +2322,15 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct dma_map_ops *dma_ops;
dev->archdata.dma_coherent = coherent;
+
+ /*
+ * Don't override the dma_ops if they have already been set. Ideally
+ * this should be the only location where dma_ops are set, remove this
+ * check when all other callers of set_dma_ops will have disappeared.
+ */
+ if (dev->archdata.dma_ops)
+ return;
+
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
dma_ops = arm_get_iommu_dma_map_ops(coherent);
else
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index c64268dbff64c..32a2d9b98effb 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -86,6 +86,12 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
extern void __dma_map_area(const void *, size_t, int);
extern void __dma_unmap_area(const void *, size_t, int);
extern void __dma_flush_range(const void *, const void *);
+extern void __dma_inv_range(const void *, const void *);
+extern void __dma_clean_range(const void *, const void *);
+
+#define dmac_flush_range __dma_flush_range
+#define dmac_inv_range __dma_inv_range
+#define dmac_clean_range __dma_clean_range
/*
* Copy user data from/to a page which is mapped into a different
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 07d7352d7c389..b68082ba07957 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -117,7 +117,7 @@ ENTRY(__inval_cache_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-__dma_inv_range:
+ENTRY(__dma_inv_range)
dcache_line_size x2, x3
sub x3, x2, #1
tst x1, x3 // end cache line aligned?
@@ -143,7 +143,7 @@ ENDPROC(__dma_inv_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-__dma_clean_range:
+ENTRY(__dma_clean_range)
dcache_line_size x2, x3
sub x3, x2, #1
bic x0, x0, x3
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 6482d47deb507..01a4ca79f05a6 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -558,6 +558,12 @@ static int platform_drv_probe(struct device *_dev)
if (ret < 0)
return ret;
+ if (of_have_populated_dt()) {
+ ret = of_dma_configure_ops(_dev, _dev->of_node);
+ if (ret < 0)
+ goto done;
+ }
+
ret = dev_pm_domain_attach(_dev, true);
if (ret != -EPROBE_DEFER) {
if (drv->probe) {
@@ -570,6 +576,11 @@ static int platform_drv_probe(struct device *_dev)
}
}
+ if (of_have_populated_dt()) {
+ if (ret)
+ of_dma_deconfigure(_dev);
+ }
+done:
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
dev_warn(_dev, "probe deferral not supported\n");
ret = -ENXIO;
@@ -592,6 +603,8 @@ static int platform_drv_remove(struct device *_dev)
if (drv->remove)
ret = drv->remove(dev);
dev_pm_domain_detach(_dev, true);
+ if (of_have_populated_dt())
+ of_dma_deconfigure(_dev);
return ret;
}
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index c27858ae05529..9e4c82370d533 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -397,7 +397,7 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
device->dev.dma_mask = &device->dev.coherent_dma_mask;
dev_set_name(&device->dev, "%s", driver->driver.name);
- of_dma_configure(&device->dev, host1x->dev->of_node);
+ //of_dma_configure(&device->dev, host1x->dev->of_node);
device->dev.release = host1x_device_release;
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index d432ca828472c..fa542d26799c8 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -102,6 +102,8 @@ config IOMMU_PGTABLES_L2
def_bool y
depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
+source "drivers/iommu/qcom/Kconfig"
+
# AMD IOMMU support
config AMD_IOMMU
bool "AMD IOMMU support"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 195f7b997d8e9..c74d5f45d4093 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
+obj-$(CONFIG_QCOM_IOMMU_V1) += qcom/
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 54c2ad904d585..6f9648c196c98 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -40,8 +40,11 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_iommu.h>
+#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -296,21 +299,34 @@ enum arm_smmu_implementation {
CAVIUM_SMMUV2,
};
-struct arm_smmu_smr {
- u8 idx;
+#define STREAM_UNASSIGNED 0
+#define STREAM_MULTIPLE -1
+struct arm_smmu_streamid {
+ int s2cr_idx;
+ u16 mask;
+ u16 id;
+};
+
+struct arm_smmu_stream_map_entry {
u16 mask;
u16 id;
};
struct arm_smmu_master_cfg {
+ struct arm_smmu_device *smmu;
+ struct arm_smmu_domain *smmu_domain;
int num_streamids;
- u16 streamids[MAX_MASTER_STREAMIDS];
- struct arm_smmu_smr *smrs;
+ struct arm_smmu_streamid streamids[MAX_MASTER_STREAMIDS];
+};
+
+struct arm_smmu_group_cfg {
+ int num_smes;
+ struct arm_smmu_stream_map_entry smes[MAX_MASTER_STREAMIDS];
};
struct arm_smmu_master {
struct device_node *of_node;
- struct rb_node node;
+ struct list_head list;
struct arm_smmu_master_cfg cfg;
};
@@ -336,6 +352,8 @@ struct arm_smmu_device {
u32 features;
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
+#define ARM_SMMU_OPT_SKIP_INIT (1 << 1)
+#define ARM_SMMU_OPT_NO_SMR_CHECK (1 << 2)
u32 options;
enum arm_smmu_arch_version version;
enum arm_smmu_implementation model;
@@ -347,6 +365,7 @@ struct arm_smmu_device {
u32 num_mapping_groups;
DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
+ struct iommu_group **group_lut;
unsigned long va_size;
unsigned long ipa_size;
@@ -406,8 +425,8 @@ struct arm_smmu_phandle_args {
uint32_t args[MAX_MASTER_STREAMIDS];
};
-static DEFINE_SPINLOCK(arm_smmu_devices_lock);
-static LIST_HEAD(arm_smmu_devices);
+static DEFINE_RWLOCK(arm_smmu_masters_lock);
+static LIST_HEAD(arm_smmu_masters);
struct arm_smmu_option_prop {
u32 opt;
@@ -418,6 +437,8 @@ static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
+ { ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
+ { ARM_SMMU_OPT_NO_SMR_CHECK, "qcom,no-smr-check" },
{ 0, NULL},
};
@@ -479,125 +500,23 @@ static struct device_node *dev_get_dev_node(struct device *dev)
return dev->of_node;
}
-static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
- struct device_node *dev_node)
-{
- struct rb_node *node = smmu->masters.rb_node;
-
- while (node) {
- struct arm_smmu_master *master;
-
- master = container_of(node, struct arm_smmu_master, node);
-
- if (dev_node < master->of_node)
- node = node->rb_left;
- else if (dev_node > master->of_node)
- node = node->rb_right;
- else
- return master;
- }
-
- return NULL;
-}
-
-static struct arm_smmu_master_cfg *
-find_smmu_master_cfg(struct device *dev)
-{
- struct arm_smmu_master_cfg *cfg = NULL;
- struct iommu_group *group = iommu_group_get(dev);
-
- if (group) {
- cfg = iommu_group_get_iommudata(group);
- iommu_group_put(group);
- }
-
- return cfg;
-}
-
-static int insert_smmu_master(struct arm_smmu_device *smmu,
- struct arm_smmu_master *master)
+static struct arm_smmu_master *find_smmu_master(struct device_node *dev_node)
{
- struct rb_node **new, *parent;
-
- new = &smmu->masters.rb_node;
- parent = NULL;
- while (*new) {
- struct arm_smmu_master *this
- = container_of(*new, struct arm_smmu_master, node);
-
- parent = *new;
- if (master->of_node < this->of_node)
- new = &((*new)->rb_left);
- else if (master->of_node > this->of_node)
- new = &((*new)->rb_right);
- else
- return -EEXIST;
- }
-
- rb_link_node(&master->node, parent, new);
- rb_insert_color(&master->node, &smmu->masters);
- return 0;
-}
-
-static int register_smmu_master(struct arm_smmu_device *smmu,
- struct device *dev,
- struct arm_smmu_phandle_args *masterspec)
-{
- int i;
struct arm_smmu_master *master;
- master = find_smmu_master(smmu, masterspec->np);
- if (master) {
- dev_err(dev,
- "rejecting multiple registrations for master device %s\n",
- masterspec->np->name);
- return -EBUSY;
- }
-
- if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
- dev_err(dev,
- "reached maximum number (%d) of stream IDs for master device %s\n",
- MAX_MASTER_STREAMIDS, masterspec->np->name);
- return -ENOSPC;
- }
-
- master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
- if (!master)
- return -ENOMEM;
-
- master->of_node = masterspec->np;
- master->cfg.num_streamids = masterspec->args_count;
-
- for (i = 0; i < master->cfg.num_streamids; ++i) {
- u16 streamid = masterspec->args[i];
-
- if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
- (streamid >= smmu->num_mapping_groups)) {
- dev_err(dev,
- "stream ID for master device %s greater than maximum allowed (%d)\n",
- masterspec->np->name, smmu->num_mapping_groups);
- return -ERANGE;
- }
- master->cfg.streamids[i] = streamid;
- }
- return insert_smmu_master(smmu, master);
-}
+ if (!dev_node)
+ return NULL;
-static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
-{
- struct arm_smmu_device *smmu;
- struct arm_smmu_master *master = NULL;
- struct device_node *dev_node = dev_get_dev_node(dev);
+ read_lock(&arm_smmu_masters_lock);
+ list_for_each_entry(master, &arm_smmu_masters, list)
+ if (master->of_node == dev_node)
+ goto out_unlock;
- spin_lock(&arm_smmu_devices_lock);
- list_for_each_entry(smmu, &arm_smmu_devices, list) {
- master = find_smmu_master(smmu, dev_node);
- if (master)
- break;
- }
- spin_unlock(&arm_smmu_devices_lock);
+ master = NULL;
- return master ? smmu : NULL;
+out_unlock:
+ read_unlock(&arm_smmu_masters_lock);
+ return master;
}
static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
@@ -1101,147 +1020,132 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
kfree(smmu_domain);
}
-static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
- struct arm_smmu_master_cfg *cfg)
+static int arm_smmu_master_configure_smrs(struct arm_smmu_master_cfg *cfg)
{
- int i;
- struct arm_smmu_smr *smrs;
+ int i, err;
+ struct arm_smmu_device *smmu = cfg->smmu;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ bool stream_match = smmu->features & ARM_SMMU_FEAT_STREAM_MATCH;
- if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
- return 0;
+ /* Allocate the SMRs on the SMMU */
+ for (i = 0; i < cfg->num_streamids; i++) {
+ int idx;
- if (cfg->smrs)
- return -EEXIST;
+ if (cfg->streamids[i].s2cr_idx == STREAM_MULTIPLE)
+ continue;
- smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
- if (!smrs) {
- dev_err(smmu->dev, "failed to allocate %d SMRs\n",
- cfg->num_streamids);
- return -ENOMEM;
- }
+ if (cfg->streamids[i].s2cr_idx > STREAM_UNASSIGNED) {
+ err = -EEXIST;
+ goto err_free_smrs;
+ }
+
+ if (stream_match)
+ idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
+ smmu->num_mapping_groups);
+ else
+ idx = cfg->streamids[i].id;
- /* Allocate the SMRs on the SMMU */
- for (i = 0; i < cfg->num_streamids; ++i) {
- int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
- smmu->num_mapping_groups);
if (idx < 0) {
dev_err(smmu->dev, "failed to allocate free SMR\n");
+ err = -ENOSPC;
goto err_free_smrs;
}
- smrs[i] = (struct arm_smmu_smr) {
- .idx = idx,
- .mask = 0, /* We don't currently share SMRs */
- .id = cfg->streamids[i],
- };
+ cfg->streamids[i].s2cr_idx = idx + 1;
}
+ /* For stream indexing, we're only here for the bookkeeping... */
+ if (!stream_match)
+ return 0;
+
/* It worked! Now, poke the actual hardware */
for (i = 0; i < cfg->num_streamids; ++i) {
- u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
- smrs[i].mask << SMR_MASK_SHIFT;
- writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
+ u32 idx = cfg->streamids[i].s2cr_idx - 1;
+ u32 reg = SMR_VALID | cfg->streamids[i].id << SMR_ID_SHIFT |
+ cfg->streamids[i].mask << SMR_MASK_SHIFT;
+
+ if (idx != STREAM_MULTIPLE)
+ writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(idx));
}
- cfg->smrs = smrs;
return 0;
err_free_smrs:
- while (--i >= 0)
- __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
- kfree(smrs);
- return -ENOSPC;
+ while (--i >= 0) {
+ int idx = cfg->streamids[i].s2cr_idx - 1;
+
+ if (idx != STREAM_MULTIPLE) {
+ if (stream_match)
+ __arm_smmu_free_bitmap(smmu->smr_map, idx);
+ cfg->streamids[i].s2cr_idx = STREAM_UNASSIGNED;
+ }
+ }
+ return err;
}
-static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
- struct arm_smmu_master_cfg *cfg)
+static void arm_smmu_master_free_smrs(struct arm_smmu_master_cfg *cfg)
{
int i;
+ struct arm_smmu_device *smmu = cfg->smmu;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
- struct arm_smmu_smr *smrs = cfg->smrs;
-
- if (!smrs)
- return;
+ bool stream_match = smmu->features & ARM_SMMU_FEAT_STREAM_MATCH;
/* Invalidate the SMRs before freeing back to the allocator */
for (i = 0; i < cfg->num_streamids; ++i) {
- u8 idx = smrs[i].idx;
+ u8 idx = cfg->streamids[i].s2cr_idx - 1;
+
+ if (cfg->streamids[i].s2cr_idx == STREAM_MULTIPLE)
+ continue;
+
+ cfg->streamids[i].s2cr_idx = STREAM_UNASSIGNED;
+ if (!stream_match)
+ continue;
writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
__arm_smmu_free_bitmap(smmu->smr_map, idx);
}
-
- cfg->smrs = NULL;
- kfree(smrs);
}
static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_master_cfg *cfg)
{
int i, ret;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-
- /*
- * FIXME: This won't be needed once we have IOMMU-backed DMA ops
- * for all devices behind the SMMU. Note that we need to take
- * care configuring SMRs for devices both a platform_device and
- * and a PCI device (i.e. a PCI host controller)
- */
- if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
- return 0;
+ void __iomem *gr0_base = ARM_SMMU_GR0(cfg->smmu);
/* Devices in an IOMMU group may already be configured */
- ret = arm_smmu_master_configure_smrs(smmu, cfg);
+ ret = arm_smmu_master_configure_smrs(cfg);
if (ret)
return ret == -EEXIST ? 0 : ret;
for (i = 0; i < cfg->num_streamids; ++i) {
- u32 idx, s2cr;
+ u32 idx = cfg->streamids[i].s2cr_idx - 1;
+ u32 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
+ (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
- idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
- s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
- (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
}
return 0;
}
-static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
- struct arm_smmu_master_cfg *cfg)
+static void arm_smmu_domain_remove_master(struct arm_smmu_master_cfg *cfg)
{
int i;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-
- /* An IOMMU group is torn down by the first device to be removed */
- if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
- return;
+ void __iomem *gr0_base = ARM_SMMU_GR0(cfg->smmu);
/*
* We *must* clear the S2CR first, because freeing the SMR means
* that it can be re-allocated immediately.
*/
for (i = 0; i < cfg->num_streamids; ++i) {
- u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
+ u32 idx = cfg->streamids[i].s2cr_idx - 1;
u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
}
- arm_smmu_master_free_smrs(smmu, cfg);
-}
-
-static void arm_smmu_detach_dev(struct device *dev,
- struct arm_smmu_master_cfg *cfg)
-{
- struct iommu_domain *domain = dev->archdata.iommu;
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
- dev->archdata.iommu = NULL;
- arm_smmu_domain_remove_master(smmu_domain, cfg);
+ cfg->smmu_domain = NULL;
+ arm_smmu_master_free_smrs(cfg);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1249,14 +1153,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
int ret;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu;
- struct arm_smmu_master_cfg *cfg;
+ struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
- smmu = find_smmu_for_device(dev);
- if (!smmu) {
+ if (!cfg) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
return -ENXIO;
}
+ smmu = cfg->smmu;
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
if (ret < 0)
@@ -1273,18 +1177,13 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -EINVAL;
}
- /* Looks ok, so add the device to the domain */
- cfg = find_smmu_master_cfg(dev);
- if (!cfg)
- return -ENODEV;
-
/* Detach the dev from its current domain */
- if (dev->archdata.iommu)
- arm_smmu_detach_dev(dev, cfg);
+ if (cfg->smmu_domain)
+ arm_smmu_domain_remove_master(cfg);
ret = arm_smmu_domain_add_master(smmu_domain, cfg);
if (!ret)
- dev->archdata.iommu = domain;
+ cfg->smmu_domain = smmu_domain;
return ret;
}
@@ -1404,75 +1303,74 @@ static bool arm_smmu_capable(enum iommu_cap cap)
}
}
-static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
-{
- *((u16 *)data) = alias;
- return 0; /* Continue walking */
-}
-
-static void __arm_smmu_release_pci_iommudata(void *data)
+static int arm_smmu_add_dev_streamid(struct arm_smmu_device *smmu,
+ struct device *dev, u16 sid)
{
- kfree(data);
-}
-
-static int arm_smmu_init_pci_device(struct pci_dev *pdev,
- struct iommu_group *group)
-{
- struct arm_smmu_master_cfg *cfg;
- u16 sid;
+ struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
int i;
- cfg = iommu_group_get_iommudata(group);
if (!cfg) {
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return -ENOMEM;
- iommu_group_set_iommudata(group, cfg,
- __arm_smmu_release_pci_iommudata);
+ cfg->smmu = smmu;
+ dev->archdata.iommu = cfg;
}
if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
return -ENOSPC;
- /*
- * Assume Stream ID == Requester ID for now.
- * We need a way to describe the ID mappings in FDT.
- */
- pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
+ /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
for (i = 0; i < cfg->num_streamids; ++i)
- if (cfg->streamids[i] == sid)
- break;
+ if (cfg->streamids[i].id == sid) {
+ dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
+ sid);
+ return 0;
+ }
- /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
- if (i == cfg->num_streamids)
- cfg->streamids[cfg->num_streamids++] = sid;
+ cfg->streamids[cfg->num_streamids++].id = sid;
return 0;
}
-static int arm_smmu_init_platform_device(struct device *dev,
- struct iommu_group *group)
+static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
{
- struct arm_smmu_device *smmu = find_smmu_for_device(dev);
- struct arm_smmu_master *master;
+ *((u16 *)data) = alias;
+ return 0; /* Continue walking */
+}
- if (!smmu)
- return -ENODEV;
+static int arm_smmu_init_legacy_master(struct device *dev)
+{
+ struct arm_smmu_master *master;
+ struct device_node *np = dev_get_dev_node(dev);
+ u16 sid;
- master = find_smmu_master(smmu, dev->of_node);
+ master = find_smmu_master(np);
if (!master)
return -ENODEV;
- iommu_group_set_iommudata(group, &master->cfg, NULL);
+ if (!dev_is_pci(dev)) {
+ dev->archdata.iommu = &master->cfg;
+ return 0;
+ }
- return 0;
+ /* Legacy bindings assume Stream ID == Requester ID */
+ pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid, &sid);
+ return arm_smmu_add_dev_streamid(master->cfg.smmu, dev, sid);
}
static int arm_smmu_add_device(struct device *dev)
{
struct iommu_group *group;
+ if (!dev->archdata.iommu) {
+ int ret = arm_smmu_init_legacy_master(dev);
+
+ if (ret)
+ return ret;
+ }
+
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
@@ -1484,29 +1382,113 @@ static int arm_smmu_add_device(struct device *dev)
static void arm_smmu_remove_device(struct device *dev)
{
iommu_group_remove_device(dev);
+ if (!find_smmu_master(dev->of_node))
+ kfree(dev->archdata.iommu);
+}
+
+static void __arm_smmu_release_iommudata(void *data)
+{
+ kfree(data);
+}
+
+static struct iommu_group *arm_smmu_group_lookup(struct device *dev)
+{
+ struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
+ struct arm_smmu_device *smmu = cfg->smmu;
+ struct iommu_group **lut = smmu->group_lut;
+ struct iommu_group *group = NULL;
+ int i;
+
+ if (!lut) {
+ /*
+ * Unfortunately this has to be sized for the worst-case until
+ * we get even cleverer with stream ID management.
+ */
+ lut = devm_kcalloc(smmu->dev, SMR_ID_MASK + 1,
+ sizeof(*lut), GFP_KERNEL);
+ if (lut)
+ smmu->group_lut = lut;
+ else
+ group = ERR_PTR(-ENOMEM);
+ } else {
+ /* Check for platform or cross-bus aliases */
+ for (i = 0; i < cfg->num_streamids; i++) {
+ struct iommu_group *tmp = lut[cfg->streamids[i].id];
+
+ if (!tmp)
+ continue;
+
+ if (group && tmp != group) {
+ dev_err(smmu->dev,
+ "Cannot handle master %s aliasing multiple groups\n",
+ dev_name(dev));
+ return ERR_PTR(-EBUSY);
+ }
+ group = tmp;
+ }
+ }
+ return group;
+}
+
+static inline bool __streamid_match_sme(struct arm_smmu_streamid *sid,
+ struct arm_smmu_stream_map_entry *sme)
+{
+ /* This will get rather more complicated with masking... */
+ return sid->id == sme->id;
}
static struct iommu_group *arm_smmu_device_group(struct device *dev)
{
+ struct arm_smmu_master_cfg *master_cfg = dev->archdata.iommu;
+ struct arm_smmu_group_cfg *group_cfg;
+ struct arm_smmu_streamid *sids;
+ struct arm_smmu_stream_map_entry *smes;
struct iommu_group *group;
- int ret;
+ int i, j;
- if (dev_is_pci(dev))
- group = pci_device_group(dev);
- else
- group = generic_device_group(dev);
+ group = arm_smmu_group_lookup(dev);
+ if (!group) {
+ if (dev_is_pci(dev))
+ group = pci_device_group(dev);
+ else
+ group = generic_device_group(dev);
+ }
if (IS_ERR(group))
return group;
- if (dev_is_pci(dev))
- ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
- else
- ret = arm_smmu_init_platform_device(dev, group);
+ group_cfg = iommu_group_get_iommudata(group);
+ if (!group_cfg) {
+ group_cfg = kzalloc(sizeof(*group_cfg), GFP_KERNEL);
+ if (!group_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ iommu_group_set_iommudata(group, group_cfg,
+ __arm_smmu_release_iommudata);
+ }
- if (ret) {
- iommu_group_put(group);
- group = ERR_PTR(ret);
+ /* Propagate device's IDs to the group, avoiding duplicate entries */
+ sids = master_cfg->streamids;
+ smes = group_cfg->smes;
+ for (i = 0; i < master_cfg->num_streamids; i++) {
+ master_cfg->smmu->group_lut[sids[i].id] = group;
+
+ for (j = 0; j < group_cfg->num_smes; j++) {
+ if (__streamid_match_sme(&sids[i], &smes[j])) {
+ sids[i].s2cr_idx = STREAM_MULTIPLE;
+ break;
+ }
+ }
+
+ if (j < group_cfg->num_smes)
+ continue;
+
+ if (group_cfg->num_smes == MAX_MASTER_STREAMIDS) {
+ iommu_group_put(group);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ smes[group_cfg->num_smes++].id = sids[i].id;
}
return group;
@@ -1556,6 +1538,20 @@ out_unlock:
return ret;
}
+static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+ struct arm_smmu_device *smmu;
+ struct platform_device *smmu_pdev;
+
+ smmu_pdev = of_find_device_by_node(args->np);
+ if (!smmu_pdev)
+ return -ENODEV;
+
+ smmu = platform_get_drvdata(smmu_pdev);
+
+ return arm_smmu_add_dev_streamid(smmu, dev, args->args[0]);
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -1570,6 +1566,7 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
+ .of_xlate = arm_smmu_of_xlate,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -1584,11 +1581,13 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
- /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
- reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
- for (i = 0; i < smmu->num_mapping_groups; ++i) {
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
- writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
+ if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
+ /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
+ reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
+ for (i = 0; i < smmu->num_mapping_groups; ++i) {
+ writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
+ writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
+ }
}
/*
@@ -1604,19 +1603,21 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
}
- /* Make sure all context banks are disabled and clear CB_FSR */
- for (i = 0; i < smmu->num_context_banks; ++i) {
- cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
- writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
- writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
- /*
- * Disable MMU-500's not-particularly-beneficial next-page
- * prefetcher for the sake of errata #841119 and #826419.
- */
- if (smmu->model == ARM_MMU500) {
- reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
- reg &= ~ARM_MMU500_ACTLR_CPRE;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
+ if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
+ /* Make sure all context banks are disabled and clear CB_FSR */
+ for (i = 0; i < smmu->num_context_banks; ++i) {
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
+ writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
+ writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
+ /*
+ * Disable MMU-500's not-particularly-beneficial next-page
+ * prefetcher for the sake of errata #841119 and #826419.
+ */
+ if (smmu->model == ARM_MMU500) {
+ reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
+ reg &= ~ARM_MMU500_ACTLR_CPRE;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
+ }
}
}
@@ -1792,8 +1793,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
"\t(IDR0.CTTW overridden by dma-coherent property)\n");
if (id & ID0_SMS) {
- u32 smr, sid, mask;
-
smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
ID0_NUMSMRG_MASK;
@@ -1803,23 +1802,27 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENODEV;
}
- smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
- smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
-
- mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
- sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
- if ((mask & sid) != sid) {
- dev_err(smmu->dev,
- "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
- mask, sid);
- return -ENODEV;
+ if (!(smmu->options & ARM_SMMU_OPT_NO_SMR_CHECK)) {
+ u32 smr, sid, mask;
+
+ smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
+ smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+
+ mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
+ sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
+ if ((mask & sid) != sid) {
+ dev_err(smmu->dev,
+ "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
+ mask, sid);
+ return -ENODEV;
+ }
+
+ dev_notice(smmu->dev,
+ "\tstream matching with %u register groups, mask 0x%x",
+ smmu->num_mapping_groups, mask);
}
-
- dev_notice(smmu->dev,
- "\tstream matching with %u register groups, mask 0x%x",
- smmu->num_mapping_groups, mask);
} else {
smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
ID0_NUMSIDB_MASK;
@@ -1954,6 +1957,117 @@ static const struct of_device_id arm_smmu_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
+static void arm_smmu_remove_mmu_masters(struct arm_smmu_device *smmu)
+{
+ struct arm_smmu_master *master, *tmp;
+
+ write_lock(&arm_smmu_masters_lock);
+ list_for_each_entry_safe(master, tmp, &arm_smmu_masters, list) {
+ if (master->cfg.smmu != smmu)
+ continue;
+
+ list_del(&master->list);
+ of_node_put(master->of_node);
+ devm_kfree(smmu->dev, master);
+ }
+ write_unlock(&arm_smmu_masters_lock);
+}
+
+static int insert_smmu_master(struct arm_smmu_master *master)
+{
+ struct arm_smmu_master *tmp;
+ int ret = -EEXIST;
+
+ write_lock(&arm_smmu_masters_lock);
+ list_for_each_entry(tmp, &arm_smmu_masters, list)
+ if (tmp->of_node == master->of_node)
+ goto out_unlock;
+
+ ret = 0;
+ list_add(&master->list, &arm_smmu_masters);
+out_unlock:
+ write_unlock(&arm_smmu_masters_lock);
+ return ret;
+}
+
+static int register_smmu_master(struct arm_smmu_device *smmu,
+ struct arm_smmu_phandle_args *masterspec)
+{
+ int i;
+ struct arm_smmu_master *master;
+
+ master = find_smmu_master(masterspec->np);
+ if (master) {
+ dev_err(smmu->dev,
+ "rejecting multiple registrations for master device %s\n",
+ masterspec->np->name);
+ return -EBUSY;
+ }
+
+ if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
+ dev_err(smmu->dev,
+ "reached maximum number (%d) of stream IDs for master device %s\n",
+ MAX_MASTER_STREAMIDS, masterspec->np->name);
+ return -ENOSPC;
+ }
+
+ master = devm_kzalloc(smmu->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->of_node = masterspec->np;
+ master->cfg.num_streamids = masterspec->args_count;
+
+ for (i = 0; i < master->cfg.num_streamids; ++i) {
+ u16 streamid = masterspec->args[i];
+
+ if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
+ (streamid >= smmu->num_mapping_groups)) {
+ dev_err(smmu->dev,
+ "stream ID for master device %s greater than maximum allowed (%d)\n",
+ masterspec->np->name, smmu->num_mapping_groups);
+ return -ERANGE;
+ }
+ master->cfg.streamids[i].id = streamid;
+ /* leave .mask 0; we don't currently share SMRs */
+ }
+ return insert_smmu_master(master);
+}
+
+static int arm_smmu_probe_mmu_masters(struct arm_smmu_device *smmu)
+{
+ struct of_phandle_iterator it;
+ struct arm_smmu_phandle_args *masterspec;
+ int err = 0, i = 0;
+
+ dev_notice(smmu->dev,
+ "Deprecated \"mmu-masters\" property found; update DT to \"iommus\" property if possible\n");
+
+ /* No need to zero the memory for masterspec */
+ masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
+ if (!masterspec)
+ return -ENOMEM;
+
+ of_for_each_phandle(&it, err, smmu->dev->of_node,
+ "mmu-masters", "#stream-id-cells", 0) {
+ int count = of_phandle_iterator_args(&it, masterspec->args,
+ MAX_MASTER_STREAMIDS);
+ masterspec->np = of_node_get(it.node);
+ masterspec->args_count = count;
+
+ err = register_smmu_master(smmu, masterspec);
+
+ if (err) {
+ dev_err(smmu->dev, "failed to register mmu-masters\n");
+ arm_smmu_remove_mmu_masters(smmu);
+ } else if (i) {
+ dev_info(smmu->dev, "registered %d mmu-masters\n", i);
+ }
+ }
+
+ return err;
+}
+
static int arm_smmu_device_dt_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
@@ -1961,9 +2075,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
struct resource *res;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
- struct rb_node *node;
- struct of_phandle_iterator it;
- struct arm_smmu_phandle_args *masterspec;
int num_irqs, i, err;
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
@@ -2020,66 +2131,41 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
smmu->irqs[i] = irq;
}
+ pm_runtime_enable(dev);
+
+ err = pm_runtime_get_sync(dev);
+ if (err)
+ goto err_pm_runtime;
+
err = arm_smmu_init_regulators(smmu);
if (err)
- goto out;
+ goto err_pm_runtime;
err = arm_smmu_init_clocks(smmu);
if (err)
- goto out;
+ goto err_pm_runtime;
err = arm_smmu_enable_regulators(smmu);
if (err)
- goto out;
+ goto err_pm_runtime;
err = arm_smmu_enable_clocks(smmu);
if (err)
goto out_disable_regulators;
+ parse_driver_options(smmu);
+
err = arm_smmu_device_cfg_probe(smmu);
if (err)
goto out_disable_clocks;
- i = 0;
- smmu->masters = RB_ROOT;
-
- err = -ENOMEM;
- /* No need to zero the memory for masterspec */
- masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
- if (!masterspec)
- goto out_put_masters;
-
- of_for_each_phandle(&it, err, dev->of_node,
- "mmu-masters", "#stream-id-cells", 0) {
- int count = of_phandle_iterator_args(&it, masterspec->args,
- MAX_MASTER_STREAMIDS);
- masterspec->np = of_node_get(it.node);
- masterspec->args_count = count;
-
- err = register_smmu_master(smmu, dev, masterspec);
- if (err) {
- dev_err(dev, "failed to add master %s\n",
- masterspec->np->name);
- kfree(masterspec);
- goto out_put_masters;
- }
-
- i++;
- }
-
- dev_notice(dev, "registered %d master devices\n", i);
-
- kfree(masterspec);
-
- parse_driver_options(smmu);
-
if (smmu->version == ARM_SMMU_V2 &&
smmu->num_context_banks != smmu->num_context_irqs) {
dev_err(dev,
- "found only %d context interrupt(s) but %d required\n",
- smmu->num_context_irqs, smmu->num_context_banks);
- err = -ENODEV;
- goto out_put_masters;
+ "found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
+ smmu->num_context_irqs, smmu->num_context_banks,
+ smmu->num_context_banks);
+ smmu->num_context_irqs = smmu->num_context_banks;
}
for (i = 0; i < smmu->num_global_irqs; ++i) {
@@ -2091,24 +2177,22 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (err) {
dev_err(dev, "failed to request global IRQ %d (%u)\n",
i, smmu->irqs[i]);
- goto out_put_masters;
+ goto out_free_irqs;
}
}
- INIT_LIST_HEAD(&smmu->list);
- spin_lock(&arm_smmu_devices_lock);
- list_add(&smmu->list, &arm_smmu_devices);
- spin_unlock(&arm_smmu_devices_lock);
-
+ platform_set_drvdata(pdev, smmu);
+ /* Check first to avoid of_parse_phandle_with_args complaining */
+ if (of_property_read_bool(dev->of_node, "mmu-masters"))
+ arm_smmu_probe_mmu_masters(smmu);
arm_smmu_device_reset(smmu);
+ of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
return 0;
-out_put_masters:
- for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
- struct arm_smmu_master *master
- = container_of(node, struct arm_smmu_master, node);
- of_node_put(master->of_node);
- }
+
+err_pm_runtime:
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
out_free_irqs:
while (i--)
@@ -2126,32 +2210,16 @@ out:
static int arm_smmu_device_remove(struct platform_device *pdev)
{
+ struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
int i;
- struct device *dev = &pdev->dev;
- struct arm_smmu_device *curr, *smmu = NULL;
- struct rb_node *node;
-
- spin_lock(&arm_smmu_devices_lock);
- list_for_each_entry(curr, &arm_smmu_devices, list) {
- if (curr->dev == dev) {
- smmu = curr;
- list_del(&smmu->list);
- break;
- }
- }
- spin_unlock(&arm_smmu_devices_lock);
if (!smmu)
return -ENODEV;
- for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
- struct arm_smmu_master *master
- = container_of(node, struct arm_smmu_master, node);
- of_node_put(master->of_node);
- }
+ arm_smmu_remove_mmu_masters(smmu);
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
- dev_err(dev, "removing device with active domains!\n");
+ dev_err(&pdev->dev, "removing device with active domains!\n");
for (i = 0; i < smmu->num_global_irqs; ++i)
devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
@@ -2160,6 +2228,8 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
arm_smmu_disable_clocks(smmu);
arm_smmu_disable_regulators(smmu);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -2176,8 +2246,11 @@ static struct platform_driver arm_smmu_driver = {
static int __init arm_smmu_init(void)
{
struct device_node *np;
+ static bool done;
int ret;
+ if (done)
+ return 0;
/*
* Play nice with systems that don't have an ARM SMMU by checking that
* an ARM SMMU exists in the system before proceeding with the driver
@@ -2209,6 +2282,7 @@ static int __init arm_smmu_init(void)
}
#endif
+ done = true;
return 0;
}
@@ -2220,6 +2294,16 @@ static void __exit arm_smmu_exit(void)
subsys_initcall(arm_smmu_init);
module_exit(arm_smmu_exit);
+static int __init arm_smmu_of_init(struct device_node *np)
+{
+ return arm_smmu_init();
+}
+IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
+
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 00c8a08d56e72..227418127d8f2 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -503,6 +503,10 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long mask = dma_get_seg_boundary(dev);
int i;
+ if (!iovad) {
+ iommu_get_dma_cookie(domain);
+ iovad = domain->iova_cookie;
+ }
/*
* Work out how much IOVA space we need, and align the segments to
* IOVA granules for the IOMMU driver to handle. With some clever
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index b06d935944369..f359254ca6dab 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -888,6 +888,18 @@ static int remove_iommu_group(struct device *dev, void *data)
return 0;
}
+int iommu_bus_add_dev(struct device *dev)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+ int ret = -ENODEV;
+
+ if (ops->add_device)
+ ret = ops->add_device(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_bus_add_dev);
+
static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index b09692bb5b0a2..1110b72f5df53 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -411,6 +411,7 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
__disable_clocks(iommu);
list_add(&iommu->dom_node, &priv->list_attached);
+ iommu->domain = domain;
}
}
@@ -614,8 +615,8 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
goto fail;
}
- pr_err("Unexpected IOMMU page fault!\n");
- pr_err("base = %08x\n", (unsigned int)iommu->base);
+ pr_debug("Unexpected IOMMU page fault!\n");
+ pr_debug("base = %08x\n", (unsigned int)iommu->base);
ret = __enable_clocks(iommu);
if (ret)
@@ -624,10 +625,16 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
for (i = 0; i < iommu->ncb; i++) {
fsr = GET_FSR(iommu->base, i);
if (fsr) {
- pr_err("Fault occurred in context %d.\n", i);
- pr_err("Interesting registers:\n");
- print_ctx_regs(iommu->base, i);
+ int ret = report_iommu_fault(iommu->domain,
+ to_msm_priv(iommu->domain)->dev,
+ GET_FAR(iommu->base, i), 0);
+ if (ret == -ENOSYS) {
+ pr_err("Fault occurred in context %d.\n", i);
+ pr_err("Interesting registers:\n");
+ print_ctx_regs(iommu->base, i);
+ }
SET_FSR(iommu->base, i, 0x4000000F);
+ SET_RESUME(iommu->base, i, 1);
}
}
__disable_clocks(iommu);
diff --git a/drivers/iommu/msm_iommu.h b/drivers/iommu/msm_iommu.h
index 4ca25d50d6796..c53016c830370 100644
--- a/drivers/iommu/msm_iommu.h
+++ b/drivers/iommu/msm_iommu.h
@@ -56,6 +56,8 @@
* dom_node: list head for domain
* ctx_list: list of 'struct msm_iommu_ctx_dev'
* context_map: Bitmap to track allocated context banks
+ * domain: iommu domain that this iommu dev is a member of,
+ * ie. whose msm_priv::list_attached are we on?
*/
struct msm_iommu_dev {
void __iomem *base;
@@ -68,6 +70,7 @@ struct msm_iommu_dev {
struct list_head dom_node;
struct list_head ctx_list;
DECLARE_BITMAP(context_map, IOMMU_MAX_CBS);
+ struct iommu_domain *domain;
};
/**
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 57f23eaaa2f9a..0132a2c1b73c3 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -22,6 +22,7 @@
#include <linux/limits.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
+#include <linux/of_pci.h>
#include <linux/slab.h>
static const struct of_device_id __iommu_of_table_sentinel
@@ -134,20 +135,48 @@ const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
return ops;
}
+static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct of_phandle_args *iommu_spec = data;
+ struct device_node *np = pdev->bus->dev.of_node;
+
+ iommu_spec->args[0] = alias;
+ return np == iommu_spec->np;
+}
+
const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
struct of_phandle_args iommu_spec;
- struct device_node *np;
+ struct device_node *np = NULL;
const struct iommu_ops *ops = NULL;
int idx = 0;
- /*
- * We can't do much for PCI devices without knowing how
- * device IDs are wired up from the PCI bus to the IOMMU.
- */
- if (dev_is_pci(dev))
- return NULL;
+ if (dev_is_pci(dev)) {
+ /*
+ * Start by tracing the RID alias down the PCI topology as
+ * far as the host bridge whose OF node we have...
+ */
+ iommu_spec.np = master_np;
+ pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid,
+ &iommu_spec);
+ /*
+ * ...then find out what that becomes once it escapes the PCI
+ * bus into the system beyond, and which IOMMU it ends up at.
+ */
+ if (of_pci_map_rid(master_np, "iommu-map", iommu_spec.args[0],
+ &np, iommu_spec.args))
+ return NULL;
+
+ iommu_spec.np = np;
+ iommu_spec.args_count = 1;
+ ops = of_iommu_get_ops(np);
+ if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
+ goto err_put_node;
+
+ of_node_put(np);
+ return ops;
+ }
/*
* We don't currently walk up the tree looking for a parent IOMMU.
@@ -160,8 +189,18 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
np = iommu_spec.np;
ops = of_iommu_get_ops(np);
- if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
+ if (!ops) {
+ const struct of_device_id *oid;
+
+ oid = of_match_node(&__iommu_of_table, np);
+ ops = oid ? ERR_PTR(-EPROBE_DEFER) : NULL;
goto err_put_node;
+ }
+
+ if (!ops->of_xlate || ops->of_xlate(dev, &iommu_spec)) {
+ ops = NULL;
+ goto err_put_node;
+ }
of_node_put(np);
idx++;
@@ -171,7 +210,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
err_put_node:
of_node_put(np);
- return NULL;
+ return ops;
}
static int __init of_iommu_init(void)
@@ -182,7 +221,7 @@ static int __init of_iommu_init(void)
for_each_matching_node_and_match(np, matches, &match) {
const of_iommu_init_fn init_fn = match->data;
- if (init_fn(np))
+ if (init_fn && init_fn(np))
pr_err("Failed to initialise IOMMU %s\n",
of_node_full_name(np));
}
diff --git a/drivers/iommu/qcom/Kconfig b/drivers/iommu/qcom/Kconfig
new file mode 100644
index 0000000000000..b507e575cb958
--- /dev/null
+++ b/drivers/iommu/qcom/Kconfig
@@ -0,0 +1,44 @@
+# Qualcomm IOMMU support
+
+# QCOM IOMMUv1 support
+config QCOM_IOMMU_V1
+ bool "Qualcomm IOMMUv1 Support"
+ depends on ARCH_QCOM
+ select IOMMU_API
+ select QCOM_SCM
+ select ARM_DMA_USE_IOMMU if ARM
+ select ARM64_DMA_USE_IOMMU if ARM64
+ help
+ Support for the IOMMUs (v1) found on certain Qualcomm SOCs.
+ These IOMMUs allow virtualization of the address space used by most
+ cores within the multimedia subsystem.
+
+ If unsure, say N here.
+
+config MMU500_ACTIVE_PREFETCH_BUG_WITH_SECTION_MAPPING
+ bool "Don't align virtual address at 1MB boundary"
+ depends on QCOM_IOMMU_V1
+ help
+ Say Y here if the MMU500 revision has a bug in active prefetch
+ which can cause TLB corruptions due to 1MB alignment of a buffer.
+ Here is the sequence which will surface this BUG.
+ 1) Create a 2-level mapping in v7S format for 1MB buffer. Start of
+ the buffer should be at even MB boundary.
+ 2) Create a section mapping for 1MB buffer adjacent to previous
+ mapping in step 1.
+ 3) Access last page from 2 level mapping followed by an access into
+ section mapped area.
+ 4) Step 3 will result into TLB corruption and this corruption can
+ lead to any misbehavior (like Permission fault) for sub-sequent
+ transactions.
+
+ If unsure, say Y here if IOMMU mapping will not exhaust the VA space.
+
+config IOMMU_PGTABLES_L2
+ bool "Allow SMMU page tables in the L2 cache (Experimental)"
+ depends on QCOM_IOMMU_V1 && MMU && SMP && CPU_DCACHE_DISABLE=n
+ help
+ Improves TLB miss latency at the expense of potential L2 pollution.
+ However, with large multimedia buffers, the TLB should mostly contain
+ section mappings and TLB misses should be quite infrequent.
+ Most people can probably say Y here.
diff --git a/drivers/iommu/qcom/Makefile b/drivers/iommu/qcom/Makefile
new file mode 100644
index 0000000000000..e0b1159227be4
--- /dev/null
+++ b/drivers/iommu/qcom/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_QCOM_IOMMU_V1) += qcom-iommu.o
+
+qcom-iommu-y += msm_iommu.o
+qcom-iommu-y += msm_iommu-v1.o
+qcom-iommu-y += msm_iommu_dev-v1.o
+qcom-iommu-y += msm_iommu_sec.o
+qcom-iommu-y += msm_iommu_pagetable.o
diff --git a/drivers/iommu/qcom/msm_iommu-v1.c b/drivers/iommu/qcom/msm_iommu-v1.c
new file mode 100644
index 0000000000000..d7e7b4b706ac8
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu-v1.c
@@ -0,0 +1,1540 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <linux/msm-bus.h>
+#endif
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/notifier.h>
+#include <linux/iopoll.h>
+#include <linux/qcom_iommu.h>
+#include <asm/sizes.h>
+
+#include "msm_iommu_hw-v1.h"
+#include "msm_iommu_priv.h"
+#include "msm_iommu_perfmon.h"
+#include "msm_iommu_pagetable.h"
+
+#ifdef CONFIG_IOMMU_LPAE
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_2M | SZ_32M | SZ_1G)
+#else
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+#endif
+
+#define IOMMU_MSEC_STEP 10
+#define IOMMU_MSEC_TIMEOUT 5000
+
+static DEFINE_MUTEX(msm_iommu_lock);
+struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS];
+
+static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+ ++drvdata->powered_on;
+
+ return 0;
+}
+
+static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+ --drvdata->powered_on;
+}
+
+static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote)
+{
+ int ret = 0;
+#ifdef CONFIG_MSM_BUS_SCALING
+ if (drvdata->bus_client) {
+ ret = msm_bus_scale_client_update_request(drvdata->bus_client,
+ vote);
+ if (ret)
+ pr_err("%s: Failed to vote for bus: %d\n", __func__,
+ vote);
+ }
+#endif
+ return ret;
+}
+
+static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->iface);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->core);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ clk_disable_unprepare(drvdata->iface);
+ return ret;
+}
+
+static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+ clk_disable_unprepare(drvdata->core);
+ clk_disable_unprepare(drvdata->iface);
+}
+
+static void _iommu_lock_acquire(unsigned int need_extra_lock)
+{
+ mutex_lock(&msm_iommu_lock);
+}
+
+static void _iommu_lock_release(unsigned int need_extra_lock)
+{
+ mutex_unlock(&msm_iommu_lock);
+}
+
+struct iommu_access_ops iommu_access_ops_v1 = {
+ .iommu_power_on = __enable_regulators,
+ .iommu_power_off = __disable_regulators,
+ .iommu_bus_vote = apply_bus_vote,
+ .iommu_clk_on = __enable_clocks,
+ .iommu_clk_off = __disable_clocks,
+ .iommu_lock_acquire = _iommu_lock_acquire,
+ .iommu_lock_release = _iommu_lock_release,
+};
+
+static BLOCKING_NOTIFIER_HEAD(msm_iommu_notifier_list);
+
+void msm_iommu_register_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_register(&msm_iommu_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_iommu_register_notify);
+
+#ifdef CONFIG_MSM_IOMMU_VBIF_CHECK
+
+#define VBIF_XIN_HALT_CTRL0 0x200
+#define VBIF_XIN_HALT_CTRL1 0x204
+#define VBIF_AXI_HALT_CTRL0 0x208
+#define VBIF_AXI_HALT_CTRL1 0x20C
+
+static void __halt_vbif_xin(void __iomem *vbif_base)
+{
+ pr_err("Halting VBIF_XIN\n");
+ writel_relaxed(0xFFFFFFFF, vbif_base + VBIF_XIN_HALT_CTRL0);
+}
+
+static void __dump_vbif_state(void __iomem *base, void __iomem *vbif_base)
+{
+ unsigned int reg_val;
+
+ reg_val = readl_relaxed(base + MICRO_MMU_CTRL);
+ pr_err("Value of SMMU_IMPLDEF_MICRO_MMU_CTRL = 0x%x\n", reg_val);
+
+ reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL0);
+ pr_err("Value of VBIF_XIN_HALT_CTRL0 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL1);
+ pr_err("Value of VBIF_XIN_HALT_CTRL1 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL0);
+ pr_err("Value of VBIF_AXI_HALT_CTRL0 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL1);
+ pr_err("Value of VBIF_AXI_HALT_CTRL1 = 0x%x\n", reg_val);
+}
+
+static int __check_vbif_state(struct msm_iommu_drvdata const *drvdata)
+{
+ phys_addr_t addr = (phys_addr_t) (drvdata->phys_base
+ - (phys_addr_t) 0x4000);
+ void __iomem *base = ioremap(addr, 0x1000);
+ int ret = 0;
+
+ if (base) {
+ __dump_vbif_state(drvdata->base, base);
+ __halt_vbif_xin(base);
+ __dump_vbif_state(drvdata->base, base);
+ iounmap(base);
+ } else {
+ pr_err("%s: Unable to ioremap\n", __func__);
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
+{
+ int res;
+ unsigned int val;
+ void __iomem *base = drvdata->base;
+ char const *name = drvdata->name;
+
+ pr_err("Timed out waiting for IOMMU halt to complete for %s\n", name);
+ res = __check_vbif_state(drvdata);
+ if (res)
+ BUG();
+
+ pr_err("Checking if IOMMU halt completed for %s\n", name);
+
+ res = readl_poll_timeout(GLB_REG(MICRO_MMU_CTRL, base), val,
+ (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 0, 5000000);
+
+ if (res) {
+ pr_err("Timed out (again) waiting for IOMMU halt to complete for %s\n",
+ name);
+ } else {
+ pr_err("IOMMU halt completed. VBIF FIFO most likely not getting drained by master\n");
+ }
+ BUG();
+}
+
+static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
+ int ctx, struct msm_iommu_priv *priv)
+{
+ int res;
+ unsigned int val;
+ void __iomem *base = drvdata->cb_base;
+ char const *name = drvdata->name;
+
+ pr_err("Timed out waiting for TLB SYNC to complete for %s (client: %s)\n",
+ name, priv->client_name);
+ blocking_notifier_call_chain(&msm_iommu_notifier_list, TLB_SYNC_TIMEOUT,
+ (void *) priv->client_name);
+ res = __check_vbif_state(drvdata);
+ if (res)
+ BUG();
+
+ pr_err("Checking if TLB sync completed for %s\n", name);
+
+ res = readl_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
+ (val & CB_TLBSTATUS_SACTIVE) == 0, 0, 5000000);
+ if (res) {
+ pr_err("Timed out (again) waiting for TLB SYNC to complete for %s\n",
+ name);
+ } else {
+ pr_err("TLB Sync completed. VBIF FIFO most likely not getting drained by master\n");
+ }
+ BUG();
+}
+
+#else
+
+/*
+ * For targets without VBIF or for targets with the VBIF check disabled
+ * we directly just crash to capture the issue
+ */
+static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
+{
+ BUG();
+}
+
+static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
+ int ctx, struct msm_iommu_priv *priv)
+{
+ BUG();
+}
+
+#endif
+
+void iommu_halt(struct msm_iommu_drvdata const *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+ unsigned int val;
+ int res;
+
+ if (!iommu_drvdata->halt_enabled)
+ return;
+
+ SET_MICRO_MMU_CTRL_HALT_REQ(base, 1);
+ res = readl_poll_timeout(GLB_REG(MICRO_MMU_CTRL, base), val,
+ (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 0, 5000000);
+ if (res)
+ check_halt_state(iommu_drvdata);
+
+ /* Ensure device is idle before continuing */
+ mb();
+}
+
+void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata)
+{
+ if (!iommu_drvdata->halt_enabled)
+ return;
+
+ /* Ensure transactions have completed before releasing the halt */
+ mb();
+
+ SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 0);
+
+ /*
+ * Ensure write is complete before continuing to ensure
+ * we don't turn off clocks while transaction is still
+ * pending.
+ */
+ mb();
+}
+
+static void __sync_tlb(struct msm_iommu_drvdata *iommu_drvdata, int ctx,
+ struct msm_iommu_priv *priv)
+{
+ void __iomem *base = iommu_drvdata->cb_base;
+ unsigned int val;
+ unsigned int res;
+
+ SET_TLBSYNC(base, ctx, 0);
+ /* No barrier needed due to read dependency */
+
+ res = readl_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
+ (val & CB_TLBSTATUS_SACTIVE) == 0, 0, 5000000);
+ if (res)
+ check_tlb_sync_state(iommu_drvdata, ctx, priv);
+}
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+ BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ BUG_ON(!iommu_drvdata);
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ SET_TLBIVA(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid | (va & CB_TLBIVA_VA));
+ mb();
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ __disable_clocks(iommu_drvdata);
+ }
+
+fail:
+ return ret;
+}
+#endif
+
+static int __flush_iotlb(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+ BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ BUG_ON(!iommu_drvdata);
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ __disable_clocks(iommu_drvdata);
+ }
+
+fail:
+ return ret;
+}
+
+/*
+ * May only be called for non-secure iommus
+ */
+static void __reset_iommu(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+ int i, smt_size, res;
+ unsigned long val;
+
+ /* SMMU_ACR is an implementation defined register.
+ * Resetting is not required for some implementation.
+ */
+ if (iommu_drvdata->model != MMU_500)
+ SET_ACR(base, 0);
+ SET_CR2(base, 0);
+ SET_GFAR(base, 0);
+ SET_GFSRRESTORE(base, 0);
+
+ /* Invalidate the entire non-secure TLB */
+ SET_TLBIALLNSNH(base, 0);
+ SET_TLBGSYNC(base, 0);
+ res = readl_poll_timeout(GLB_REG(TLBGSTATUS, base), val,
+ (val & TLBGSTATUS_GSACTIVE) == 0, 0, 5000000);
+ if (res)
+ BUG();
+
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ for (i = 0; i < smt_size; i++)
+ SET_SMR_VALID(base, i, 0);
+
+ mb();
+}
+
+static void __reset_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+
+ if (iommu_drvdata->model != MMU_500)
+ SET_NSACR(base, 0);
+ SET_NSCR2(base, 0);
+ SET_NSGFAR(base, 0);
+ SET_NSGFSRRESTORE(base, 0);
+ mb();
+}
+
+static void __program_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+
+ if (iommu_drvdata->model == MMU_500) {
+ SET_NSACR_SMTNMC_BPTLBEN(base, 1);
+ SET_NSACR_MMUDIS_BPTLBEN(base, 1);
+ SET_NSACR_S2CR_BPTLBEN(base, 1);
+ }
+ SET_NSCR0_SMCFCFG(base, 1);
+ SET_NSCR0_USFCFG(base, 1);
+ SET_NSCR0_STALLD(base, 1);
+ SET_NSCR0_GCFGFIE(base, 1);
+ SET_NSCR0_GCFGFRE(base, 1);
+ SET_NSCR0_GFIE(base, 1);
+ SET_NSCR0_GFRE(base, 1);
+ SET_NSCR0_CLIENTPD(base, 0);
+}
+
+/*
+ * May only be called for non-secure iommus
+ */
+static void __program_iommu(struct msm_iommu_drvdata *drvdata)
+{
+ __reset_iommu(drvdata);
+
+ if (!msm_iommu_get_scm_call_avail())
+ __reset_iommu_secure(drvdata);
+
+ if (drvdata->model == MMU_500) {
+ SET_ACR_SMTNMC_BPTLBEN(drvdata->base, 1);
+ SET_ACR_MMUDIS_BPTLBEN(drvdata->base, 1);
+ SET_ACR_S2CR_BPTLBEN(drvdata->base, 1);
+ }
+ SET_CR0_SMCFCFG(drvdata->base, 1);
+ SET_CR0_USFCFG(drvdata->base, 1);
+ SET_CR0_STALLD(drvdata->base, 1);
+ SET_CR0_GCFGFIE(drvdata->base, 1);
+ SET_CR0_GCFGFRE(drvdata->base, 1);
+ SET_CR0_GFIE(drvdata->base, 1);
+ SET_CR0_GFRE(drvdata->base, 1);
+ SET_CR0_CLIENTPD(drvdata->base, 0);
+
+ if (!msm_iommu_get_scm_call_avail())
+ __program_iommu_secure(drvdata);
+
+ if (drvdata->smmu_local_base)
+ writel_relaxed(0xFFFFFFFF,
+ drvdata->smmu_local_base + SMMU_INTR_SEL_NS);
+
+ mb(); /* Make sure writes complete before returning */
+}
+
+void program_iommu_bfb_settings(void __iomem *base,
+ const struct msm_iommu_bfb_settings *bfb_settings)
+{
+ unsigned int i;
+
+ if (bfb_settings)
+ for (i = 0; i < bfb_settings->length; i++)
+ SET_GLOBAL_REG(base, bfb_settings->regs[i],
+ bfb_settings->data[i]);
+
+ /* Make sure writes complete before returning */
+ mb();
+}
+
+static void __reset_context(struct msm_iommu_drvdata *iommu_drvdata, int ctx)
+{
+ void __iomem *base = iommu_drvdata->cb_base;
+
+ /* Don't set ACTLR to zero because if context bank is in
+ * bypass mode (say after iommu_detach), still this ACTLR
+ * value matters for micro-TLB caching.
+ */
+ if (iommu_drvdata->model != MMU_500)
+ SET_ACTLR(base, ctx, 0);
+ SET_FAR(base, ctx, 0);
+ SET_FSRRESTORE(base, ctx, 0);
+ SET_NMRR(base, ctx, 0);
+ SET_PAR(base, ctx, 0);
+ SET_PRRR(base, ctx, 0);
+ SET_SCTLR(base, ctx, 0);
+ SET_TTBCR(base, ctx, 0);
+ SET_TTBR0(base, ctx, 0);
+ SET_TTBR1(base, ctx, 0);
+ mb();
+}
+
+static void __release_smg(void __iomem *base)
+{
+ int i, smt_size;
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ /* Invalidate all SMGs */
+ for (i = 0; i < smt_size; i++)
+ if (GET_SMR_VALID(base, i))
+ SET_SMR_VALID(base, i, 0);
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
+ unsigned int asid)
+{
+ SET_CB_TTBR0_ASID(base, ctx_num, asid);
+}
+#else
+static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
+ unsigned int asid)
+{
+ SET_CB_CONTEXTIDR_ASID(base, ctx_num, asid);
+}
+#endif
+
+static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *curr_ctx,
+ struct msm_iommu_priv *priv)
+{
+ void __iomem *cb_base = iommu_drvdata->cb_base;
+
+ curr_ctx->asid = curr_ctx->num;
+ msm_iommu_set_ASID(cb_base, curr_ctx->num, curr_ctx->asid);
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
+{
+ SET_CB_TTBCR_EAE(base, ctx, 1); /* Extended Address Enable (EAE) */
+}
+
+static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx)
+{
+ SET_CB_MAIR0(base, ctx, msm_iommu_get_mair0());
+ SET_CB_MAIR1(base, ctx, msm_iommu_get_mair1());
+}
+
+static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
+{
+ /*
+ * Configure page tables as inner-cacheable and shareable to reduce
+ * the TLB miss penalty.
+ */
+ SET_CB_TTBCR_SH0(base, ctx, 3); /* Inner shareable */
+ SET_CB_TTBCR_ORGN0(base, ctx, 1); /* outer cachable*/
+ SET_CB_TTBCR_IRGN0(base, ctx, 1); /* inner cachable*/
+ SET_CB_TTBCR_T0SZ(base, ctx, 0); /* 0GB-4GB */
+
+
+ SET_CB_TTBCR_SH1(base, ctx, 3); /* Inner shareable */
+ SET_CB_TTBCR_ORGN1(base, ctx, 1); /* outer cachable*/
+ SET_CB_TTBCR_IRGN1(base, ctx, 1); /* inner cachable*/
+ SET_CB_TTBCR_T1SZ(base, ctx, 0); /* TTBR1 not used */
+}
+
+#else
+
+static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
+{
+ /* Turn on TEX Remap */
+ SET_CB_SCTLR_TRE(base, ctx, 1);
+}
+
+static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx)
+{
+ SET_PRRR(base, ctx, msm_iommu_get_prrr());
+ SET_NMRR(base, ctx, msm_iommu_get_nmrr());
+}
+
+static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
+{
+ /* Configure page tables as inner-cacheable and shareable to reduce
+ * the TLB miss penalty.
+ */
+ SET_CB_TTBR0_S(base, ctx, 1);
+ SET_CB_TTBR0_NOS(base, ctx, 1);
+ SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
+ SET_CB_TTBR0_IRGN0(base, ctx, 1);
+ SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
+}
+
+#endif
+
+static int program_m2v_table(struct device *dev, void __iomem *base)
+{
+ struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_get_drvdata(dev);
+ u32 *sids = ctx_drvdata->sids;
+ u32 *sid_mask = ctx_drvdata->sid_mask;
+ unsigned int ctx = ctx_drvdata->num;
+ int num = 0, i, smt_size;
+ int len = ctx_drvdata->nsid;
+
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ /* Program the M2V tables for this context */
+ for (i = 0; i < len / sizeof(*sids); i++) {
+ for (; num < smt_size; num++)
+ if (GET_SMR_VALID(base, num) == 0)
+ break;
+ BUG_ON(num >= smt_size);
+
+ SET_SMR_VALID(base, num, 1);
+ SET_SMR_MASK(base, num, sid_mask[i]);
+ SET_SMR_ID(base, num, sids[i]);
+
+ SET_S2CR_N(base, num, 0);
+ SET_S2CR_CBNDX(base, num, ctx);
+ SET_S2CR_MEMATTR(base, num, 0x0A);
+ /* Set security bit override to be Non-secure */
+ SET_S2CR_NSCFG(base, num, 3);
+ }
+
+ return 0;
+}
+
+static void program_all_m2v_tables(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ device_for_each_child(iommu_drvdata->dev, iommu_drvdata->base,
+ program_m2v_table);
+}
+
+static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ struct msm_iommu_priv *priv, bool is_secure,
+ bool program_m2v)
+{
+ phys_addr_t pn;
+ void __iomem *base = iommu_drvdata->base;
+ void __iomem *cb_base = iommu_drvdata->cb_base;
+ unsigned int ctx = ctx_drvdata->num;
+ phys_addr_t pgtable = __pa(priv->pt.fl_table);
+
+ __reset_context(iommu_drvdata, ctx);
+ msm_iommu_setup_ctx(cb_base, ctx);
+
+ if (priv->pt.redirect)
+ msm_iommu_setup_pg_l2_redirect(cb_base, ctx);
+
+ msm_iommu_setup_memory_remap(cb_base, ctx);
+
+ pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
+ SET_CB_TTBR0_ADDR(cb_base, ctx, pn);
+
+ /* Enable context fault interrupt */
+ SET_CB_SCTLR_CFIE(cb_base, ctx, 1);
+
+ if (iommu_drvdata->model != MMU_500) {
+ /* Redirect all cacheable requests to L2 slave port. */
+ SET_CB_ACTLR_BPRCISH(cb_base, ctx, 1);
+ SET_CB_ACTLR_BPRCOSH(cb_base, ctx, 1);
+ SET_CB_ACTLR_BPRCNSH(cb_base, ctx, 1);
+ }
+
+ /* Enable private ASID namespace */
+ SET_CB_SCTLR_ASIDPNE(cb_base, ctx, 1);
+
+ if (!is_secure) {
+ if (program_m2v)
+ program_all_m2v_tables(iommu_drvdata);
+
+ SET_CBAR_N(base, ctx, 0);
+
+ /* Stage 1 Context with Stage 2 bypass */
+ SET_CBAR_TYPE(base, ctx, 1);
+
+ /* Route page faults to the non-secure interrupt */
+ SET_CBAR_IRPTNDX(base, ctx, 1);
+
+ /* Set VMID to non-secure HLOS */
+ SET_CBAR_VMID(base, ctx, 3);
+
+ /* Bypass is treated as inner-shareable */
+ SET_CBAR_BPSHCFG(base, ctx, 2);
+
+ /* Do not downgrade memory attributes */
+ SET_CBAR_MEMATTR(base, ctx, 0x0A);
+
+ }
+
+ msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv);
+
+ /* Ensure that ASID assignment has completed before we use
+ * ASID for TLB invalidation. Here, mb() is required because
+ * both these registers are separated by more than 1KB. */
+ mb();
+
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+
+ /* Enable the MMU */
+ SET_CB_SCTLR_M(cb_base, ctx, 1);
+ mb();
+}
+
+#ifdef CONFIG_IOMMU_PGTABLES_L2
+#define INITIAL_REDIRECT_VAL 1
+#else
+#define INITIAL_REDIRECT_VAL 0
+#endif
+
+static struct iommu_domain * msm_iommu_domain_alloc(unsigned type)
+{
+ struct msm_iommu_priv *priv;
+ int ret = -ENOMEM;
+ struct iommu_domain *domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto fail_nomem;
+
+ priv->pt.redirect = INITIAL_REDIRECT_VAL;
+
+ INIT_LIST_HEAD(&priv->list_attached);
+
+ ret = msm_iommu_pagetable_alloc(&priv->pt);
+ if (ret)
+ goto fail_nomem;
+
+ domain = &priv->domain;
+
+ return domain;
+
+fail_nomem:
+ kfree(priv);
+ return 0;
+}
+
+static void msm_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+
+ if (priv)
+ msm_iommu_pagetable_free(&priv->pt);
+
+ kfree(priv);
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_ctx_drvdata *tmp_drvdata;
+ int ret = 0;
+ int is_secure;
+ bool set_m2v = false;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv || !dev) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ++ctx_drvdata->attach_count;
+
+ if (ctx_drvdata->attach_count > 1)
+ goto already_attached;
+
+ if (!list_empty(&ctx_drvdata->attached_elm)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
+ if (tmp_drvdata == ctx_drvdata) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ is_secure = iommu_drvdata->sec_id != -1;
+
+ ret = __enable_regulators(iommu_drvdata);
+ if (ret)
+ goto unlock;
+
+ ret = apply_bus_vote(iommu_drvdata, 1);
+ if (ret)
+ goto unlock;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret) {
+ __disable_regulators(iommu_drvdata);
+ goto unlock;
+ }
+
+ /* We can only do this once */
+ if (!iommu_drvdata->ctx_attach_count) {
+ if (!is_secure) {
+ iommu_halt(iommu_drvdata);
+ __program_iommu(iommu_drvdata);
+ iommu_resume(iommu_drvdata);
+ } else {
+ ret = msm_iommu_sec_program_iommu(iommu_drvdata,
+ ctx_drvdata);
+ if (ret) {
+ __disable_regulators(iommu_drvdata);
+ __disable_clocks(iommu_drvdata);
+ goto unlock;
+ }
+ }
+ program_iommu_bfb_settings(iommu_drvdata->base,
+ iommu_drvdata->bfb_settings);
+ set_m2v = true;
+ }
+
+ iommu_halt(iommu_drvdata);
+
+ __program_context(iommu_drvdata, ctx_drvdata, priv, is_secure, set_m2v);
+
+ iommu_resume(iommu_drvdata);
+
+ /* Ensure TLB is clear */
+ if (iommu_drvdata->model != MMU_500) {
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ }
+
+ __disable_clocks(iommu_drvdata);
+
+ list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
+ ctx_drvdata->attached_domain = domain;
+ ++iommu_drvdata->ctx_attach_count;
+
+already_attached:
+ mutex_unlock(&msm_iommu_lock);
+
+ msm_iommu_attached(dev->parent);
+ return ret;
+unlock:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret;
+ int is_secure;
+
+ if (!dev)
+ return;
+
+ msm_iommu_detached(dev->parent);
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ if (!priv)
+ goto unlock;
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
+ goto unlock;
+
+ --ctx_drvdata->attach_count;
+ BUG_ON(ctx_drvdata->attach_count < 0);
+
+ if (ctx_drvdata->attach_count > 0)
+ goto unlock;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto unlock;
+
+ is_secure = iommu_drvdata->sec_id != -1;
+
+ if (iommu_drvdata->model == MMU_500) {
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ }
+
+ ctx_drvdata->asid = -1;
+
+ __reset_context(iommu_drvdata, ctx_drvdata->num);
+
+ /*
+ * Only reset the M2V tables on the very last detach */
+ if (!is_secure && iommu_drvdata->ctx_attach_count == 1) {
+ iommu_halt(iommu_drvdata);
+ __release_smg(iommu_drvdata->base);
+ iommu_resume(iommu_drvdata);
+ }
+
+ __disable_clocks(iommu_drvdata);
+
+ apply_bus_vote(iommu_drvdata, 0);
+
+ __disable_regulators(iommu_drvdata);
+
+ list_del_init(&ctx_drvdata->attached_elm);
+ ctx_drvdata->attached_domain = NULL;
+ BUG_ON(iommu_drvdata->ctx_attach_count == 0);
+ --iommu_drvdata->ctx_attach_count;
+unlock:
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ struct msm_iommu_priv *priv;
+ int ret = 0;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
+ if (ret)
+ goto fail;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ ret = __flush_iotlb_va(domain, va);
+#endif
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
+{
+ struct msm_iommu_priv *priv;
+ int ret = -ENODEV;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv)
+ goto fail;
+
+ ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
+ if (ret < 0)
+ goto fail;
+
+ ret = __flush_iotlb(domain);
+
+ msm_iommu_pagetable_free_tables(&priv->pt, va, len);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
+}
+
+static size_t msm_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int prot)
+{
+ struct msm_iommu_priv *priv;
+ struct scatterlist *tmp;
+ unsigned int len = 0;
+ int ret, i;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ for_each_sg(sg, tmp, nents, i)
+ len += tmp->length;
+
+ ret = msm_iommu_pagetable_map_range(&priv->pt, iova, sg, len, prot);
+ if (ret)
+ goto fail;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ __flush_iotlb(domain);
+#endif
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+ unsigned int len)
+{
+ struct msm_iommu_priv *priv;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
+
+ __flush_iotlb(domain);
+
+ msm_iommu_pagetable_free_tables(&priv->pt, va, len);
+ mutex_unlock(&msm_iommu_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
+{
+ phys_addr_t phy;
+
+ /* Upper 28 bits from PAR, lower 12 from VA */
+ phy = (par & 0xFFFFFFF000ULL) | (va & 0x00000FFF);
+
+ return phy;
+}
+#else
+static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
+{
+ phys_addr_t phy;
+
+ /* We are dealing with a supersection */
+ if (par & CB_PAR_SS)
+ phy = (par & 0xFF000000) | (va & 0x00FFFFFF);
+ else /* Upper 20 bits from PAR, lower 12 from VA */
+ phy = (par & 0xFFFFF000) | (va & 0x00000FFF);
+
+ return phy;
+}
+#endif
+
+static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ u64 par;
+ void __iomem *base;
+ phys_addr_t ret = 0;
+ int ctx;
+ int i;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (list_empty(&priv->list_attached))
+ goto fail;
+
+ ctx_drvdata = list_entry(priv->list_attached.next,
+ struct msm_iommu_ctx_drvdata, attached_elm);
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+
+ if (iommu_drvdata->model == MMU_500) {
+ ret = msm_iommu_iova_to_phys_soft(domain, va);
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+ }
+
+ base = iommu_drvdata->cb_base;
+ ctx = ctx_drvdata->num;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret) {
+ ret = 0; /* 0 indicates translation failed */
+ goto fail;
+ }
+
+ SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
+ mb();
+ for (i = 0; i < IOMMU_MSEC_TIMEOUT; i += IOMMU_MSEC_STEP)
+ if (GET_CB_ATSR_ACTIVE(base, ctx) == 0)
+ break;
+ else
+ msleep(IOMMU_MSEC_STEP);
+
+ if (i >= IOMMU_MSEC_TIMEOUT) {
+ pr_err("%s: iova to phys timed out on %pa for %s (%s)\n",
+ __func__, &va, iommu_drvdata->name, ctx_drvdata->name);
+ ret = 0;
+ goto fail;
+ }
+
+ par = GET_PAR(base, ctx);
+ __disable_clocks(iommu_drvdata);
+
+ if (par & CB_PAR_F) {
+ unsigned int level = (par & CB_PAR_PLVL) >> CB_PAR_PLVL_SHIFT;
+ pr_err("IOMMU translation fault!\n");
+ pr_err("name = %s\n", iommu_drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ pr_err("PAR = %16llx [%s%s%s%s%s%s%s%sPLVL%u %s]\n", par,
+ (par & CB_PAR_F) ? "F " : "",
+ (par & CB_PAR_TF) ? "TF " : "",
+ (par & CB_PAR_AFF) ? "AFF " : "",
+ (par & CB_PAR_PF) ? "PF " : "",
+ (par & CB_PAR_EF) ? "EF " : "",
+ (par & CB_PAR_TLBMCF) ? "TLBMCF " : "",
+ (par & CB_PAR_TLBLKF) ? "TLBLKF " : "",
+ (par & CB_PAR_ATOT) ? "ATOT " : "",
+ level,
+ (par & CB_PAR_STAGE) ? "S2 " : "S1 ");
+ ret = 0;
+ } else {
+ ret = msm_iommu_get_phy_from_PAR(va, par);
+ }
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[])
+{
+ pr_err("MAIR0 = %08x MAIR1 = %08x\n",
+ regs[DUMP_REG_MAIR0].val, regs[DUMP_REG_MAIR1].val);
+}
+#else
+static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[])
+{
+ pr_err("PRRR = %08x NMRR = %08x\n",
+ regs[DUMP_REG_PRRR].val, regs[DUMP_REG_NMRR].val);
+}
+#endif
+
+void print_ctx_regs(struct msm_iommu_context_reg regs[])
+{
+ uint32_t fsr = regs[DUMP_REG_FSR].val;
+ u64 ttbr;
+ enum dump_reg iter;
+
+ pr_err("FAR = %016llx\n",
+ COMBINE_DUMP_REG(
+ regs[DUMP_REG_FAR1].val,
+ regs[DUMP_REG_FAR0].val));
+ pr_err("PAR = %016llx\n",
+ COMBINE_DUMP_REG(
+ regs[DUMP_REG_PAR1].val,
+ regs[DUMP_REG_PAR0].val));
+ pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
+ (fsr & 0x02) ? "TF " : "",
+ (fsr & 0x04) ? "AFF " : "",
+ (fsr & 0x08) ? "PF " : "",
+ (fsr & 0x10) ? "EF " : "",
+ (fsr & 0x20) ? "TLBMCF " : "",
+ (fsr & 0x40) ? "TLBLKF " : "",
+ (fsr & 0x80) ? "MHF " : "",
+ (fsr & 0x40000000) ? "SS " : "",
+ (fsr & 0x80000000) ? "MULTI " : "");
+
+ pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
+ regs[DUMP_REG_FSYNR0].val, regs[DUMP_REG_FSYNR1].val);
+
+ ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR0_1].val,
+ regs[DUMP_REG_TTBR0_0].val);
+ if (regs[DUMP_REG_TTBR0_1].valid)
+ pr_err("TTBR0 = %016llx\n", ttbr);
+ else
+ pr_err("TTBR0 = %016llx (32b)\n", ttbr);
+
+ ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR1_1].val,
+ regs[DUMP_REG_TTBR1_0].val);
+
+ if (regs[DUMP_REG_TTBR1_1].valid)
+ pr_err("TTBR1 = %016llx\n", ttbr);
+ else
+ pr_err("TTBR1 = %016llx (32b)\n", ttbr);
+
+ pr_err("SCTLR = %08x ACTLR = %08x\n",
+ regs[DUMP_REG_SCTLR].val, regs[DUMP_REG_ACTLR].val);
+ pr_err("CBAR = %08x CBFRSYNRA = %08x\n",
+ regs[DUMP_REG_CBAR_N].val, regs[DUMP_REG_CBFRSYNRA_N].val);
+ print_ctx_mem_attr_regs(regs);
+
+ for (iter = DUMP_REG_FIRST; iter < MAX_DUMP_REGS; ++iter)
+ if (!regs[iter].valid)
+ pr_err("NOTE: Value actually unknown for %s\n",
+ dump_regs_tbl[iter].name);
+}
+
+static void __print_ctx_regs(struct msm_iommu_drvdata *drvdata, int ctx,
+ unsigned int fsr)
+{
+ void __iomem *base = drvdata->base;
+ void __iomem *cb_base = drvdata->cb_base;
+ bool is_secure = drvdata->sec_id != -1;
+
+ struct msm_iommu_context_reg regs[MAX_DUMP_REGS];
+ unsigned int i;
+ memset(regs, 0, sizeof(regs));
+
+ for (i = DUMP_REG_FIRST; i < MAX_DUMP_REGS; ++i) {
+ struct msm_iommu_context_reg *r = &regs[i];
+ unsigned long regaddr = dump_regs_tbl[i].reg_offset;
+ if (is_secure &&
+ dump_regs_tbl[i].dump_reg_type != DRT_CTX_REG) {
+ r->valid = 0;
+ continue;
+ }
+ r->valid = 1;
+ switch (dump_regs_tbl[i].dump_reg_type) {
+ case DRT_CTX_REG:
+ r->val = GET_CTX_REG(regaddr, cb_base, ctx);
+ break;
+ case DRT_GLOBAL_REG:
+ r->val = GET_GLOBAL_REG(regaddr, base);
+ break;
+ case DRT_GLOBAL_REG_N:
+ r->val = GET_GLOBAL_REG_N(regaddr, ctx, base);
+ break;
+ default:
+ pr_info("Unknown dump_reg_type...\n");
+ r->valid = 0;
+ break;
+ }
+ }
+ print_ctx_regs(regs);
+}
+
+static void print_global_regs(void __iomem *base, unsigned int gfsr)
+{
+ pr_err("GFAR = %016llx\n", GET_GFAR(base));
+
+ pr_err("GFSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", gfsr,
+ (gfsr & 0x01) ? "ICF " : "",
+ (gfsr & 0x02) ? "USF " : "",
+ (gfsr & 0x04) ? "SMCF " : "",
+ (gfsr & 0x08) ? "UCBF " : "",
+ (gfsr & 0x10) ? "UCIF " : "",
+ (gfsr & 0x20) ? "CAF " : "",
+ (gfsr & 0x40) ? "EF " : "",
+ (gfsr & 0x80) ? "PF " : "",
+ (gfsr & 0x40000000) ? "SS " : "",
+ (gfsr & 0x80000000) ? "MULTI " : "");
+
+ pr_err("GFSYNR0 = %08x\n", GET_GFSYNR0(base));
+ pr_err("GFSYNR1 = %08x\n", GET_GFSYNR1(base));
+ pr_err("GFSYNR2 = %08x\n", GET_GFSYNR2(base));
+}
+
+irqreturn_t msm_iommu_global_fault_handler(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ unsigned int gfsr;
+ int ret;
+
+ mutex_lock(&msm_iommu_lock);
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!drvdata);
+
+ if (!drvdata->powered_on) {
+ pr_err("Unexpected IOMMU global fault !!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Can't read global fault information\n");
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ if (drvdata->sec_id != -1) {
+ pr_err("NON-secure interrupt from secure %s\n", drvdata->name);
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ ret = __enable_clocks(drvdata);
+ if (ret) {
+ ret = IRQ_NONE;
+ goto fail;
+ }
+
+ gfsr = GET_GFSR(drvdata->base);
+ if (gfsr) {
+ pr_err("Unexpected %s global fault !!\n", drvdata->name);
+ print_global_regs(drvdata->base, gfsr);
+ SET_GFSR(drvdata->base, gfsr);
+ ret = IRQ_HANDLED;
+ } else
+ ret = IRQ_NONE;
+
+ __disable_clocks(drvdata);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ unsigned int fsr;
+ int ret;
+
+ phys_addr_t pagetable_phys;
+ u64 faulty_iova = 0;
+
+ mutex_lock(&msm_iommu_lock);
+
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+ BUG_ON(!drvdata);
+
+ ctx_drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!ctx_drvdata);
+
+ if (!drvdata->powered_on) {
+ pr_err("Unexpected IOMMU page fault!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Unable to read page fault information\n");
+ /*
+ * We cannot determine which context bank caused the issue so
+ * we just return handled here to ensure IRQ handler code is
+ * happy
+ */
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ ret = __enable_clocks(drvdata);
+ if (ret) {
+ ret = IRQ_NONE;
+ goto fail;
+ }
+
+ fsr = GET_FSR(drvdata->cb_base, ctx_drvdata->num);
+ if (fsr) {
+ if (!ctx_drvdata->attached_domain) {
+ pr_err("Bad domain in interrupt handler\n");
+ ret = -ENOSYS;
+ } else {
+ faulty_iova =
+ GET_FAR(drvdata->cb_base, ctx_drvdata->num);
+ ret = report_iommu_fault(ctx_drvdata->attached_domain,
+ &ctx_drvdata->pdev->dev,
+ faulty_iova, 0);
+
+ }
+ if (ret == -ENOSYS) {
+ pr_err("Unexpected IOMMU page fault!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ __print_ctx_regs(drvdata,
+ ctx_drvdata->num, fsr);
+
+ if (ctx_drvdata->attached_domain) {
+ pagetable_phys = msm_iommu_iova_to_phys_soft(
+ ctx_drvdata->attached_domain,
+ faulty_iova);
+ pr_err("Page table in DDR shows PA = %x\n",
+ (unsigned int) pagetable_phys);
+ }
+ }
+
+ if (ret != -EBUSY)
+ SET_FSR(drvdata->cb_base, ctx_drvdata->num, fsr);
+ ret = IRQ_HANDLED;
+ } else
+ ret = IRQ_NONE;
+
+ __disable_clocks(drvdata);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+
+ return __pa(priv->pt.fl_table);
+}
+
+#define DUMP_REG_INIT(dump_reg, cb_reg, mbp, drt) \
+ do { \
+ dump_regs_tbl[dump_reg].reg_offset = cb_reg; \
+ dump_regs_tbl[dump_reg].name = #cb_reg; \
+ dump_regs_tbl[dump_reg].must_be_present = mbp; \
+ dump_regs_tbl[dump_reg].dump_reg_type = drt; \
+ } while (0)
+
+static void msm_iommu_build_dump_regs_table(void)
+{
+ DUMP_REG_INIT(DUMP_REG_FAR0, CB_FAR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FAR1, CB_FAR + 4, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PAR0, CB_PAR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PAR1, CB_PAR + 4, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSR, CB_FSR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSYNR0, CB_FSYNR0, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSYNR1, CB_FSYNR1, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR0_0, CB_TTBR0, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR0_1, CB_TTBR0 + 4, 0, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR1_0, CB_TTBR1, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR1_1, CB_TTBR1 + 4, 0, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_SCTLR, CB_SCTLR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_ACTLR, CB_ACTLR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PRRR, CB_PRRR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_NMRR, CB_NMRR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_CBAR_N, CBAR, 1, DRT_GLOBAL_REG_N);
+ DUMP_REG_INIT(DUMP_REG_CBFRSYNRA_N, CBFRSYNRA, 1, DRT_GLOBAL_REG_N);
+}
+
+#ifdef CONFIG_IOMMU_PGTABLES_L2
+static void __do_set_redirect(struct iommu_domain *domain, void *data)
+{
+ struct msm_iommu_priv *priv;
+ int *no_redirect = data;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ priv->pt.redirect = !(*no_redirect);
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static void __do_get_redirect(struct iommu_domain *domain, void *data)
+{
+ struct msm_iommu_priv *priv;
+ int *no_redirect = data;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ *no_redirect = !priv->pt.redirect;
+ mutex_unlock(&msm_iommu_lock);
+}
+
+#else
+
+static void __do_set_redirect(struct iommu_domain *domain, void *data)
+{
+}
+
+static void __do_get_redirect(struct iommu_domain *domain, void *data)
+{
+}
+#endif
+
+static int msm_iommu_domain_set_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (attr) {
+ case DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE:
+ __do_set_redirect(domain, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int msm_iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (attr) {
+ case DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE:
+ __do_get_redirect(domain, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct iommu_ops msm_iommu_ops = {
+ .domain_alloc = msm_iommu_domain_alloc,
+ .domain_free = msm_iommu_domain_free,
+ .attach_dev = msm_iommu_attach_dev,
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .map_sg = msm_iommu_map_sg,
+/* .unmap_range = msm_iommu_unmap_range,*/
+ .iova_to_phys = msm_iommu_iova_to_phys,
+/* .get_pt_base_addr = msm_iommu_get_pt_base_addr,*/
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
+ .domain_set_attr = msm_iommu_domain_set_attr,
+ .domain_get_attr = msm_iommu_domain_get_attr,
+};
+
+static int __init msm_iommu_init(void)
+{
+ int ret;
+
+ msm_iommu_pagetable_init();
+
+ ret = bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
+ if (ret)
+ return ret;
+
+ msm_iommu_build_dump_regs_table();
+
+ return 0;
+}
+
+subsys_initcall(msm_iommu_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU v2 Driver");
diff --git a/drivers/iommu/qcom/msm_iommu.c b/drivers/iommu/qcom/msm_iommu.c
new file mode 100644
index 0000000000000..08d8d10da2c45
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu.c
@@ -0,0 +1,206 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/export.h>
+#include <linux/iommu.h>
+#include <linux/qcom_iommu.h>
+
+static DEFINE_MUTEX(iommu_list_lock);
+static LIST_HEAD(iommu_list);
+
+#define MRC(reg, processor, op1, crn, crm, op2) \
+__asm__ __volatile__ ( \
+" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
+: "=r" (reg))
+
+#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+#define RCP15_MAIR0(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_MAIR1(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+/* These values come from proc-v7-2level.S */
+#define PRRR_VALUE 0xff0a81a8
+#define NMRR_VALUE 0x40e040e0
+
+/* These values come from proc-v7-3level.S */
+#define MAIR0_VALUE 0xeeaa4400
+#define MAIR1_VALUE 0xff000004
+
+static struct iommu_access_ops *iommu_access_ops;
+
+struct bus_type msm_iommu_sec_bus_type = {
+ .name = "msm_iommu_sec_bus",
+};
+
+void msm_set_iommu_access_ops(struct iommu_access_ops *ops)
+{
+ iommu_access_ops = ops;
+}
+
+struct iommu_access_ops *msm_get_iommu_access_ops()
+{
+ BUG_ON(iommu_access_ops == NULL);
+ return iommu_access_ops;
+}
+EXPORT_SYMBOL(msm_get_iommu_access_ops);
+
+void msm_iommu_add_drv(struct msm_iommu_drvdata *drv)
+{
+ mutex_lock(&iommu_list_lock);
+ list_add(&drv->list, &iommu_list);
+ mutex_unlock(&iommu_list_lock);
+}
+
+void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv)
+{
+ mutex_lock(&iommu_list_lock);
+ list_del(&drv->list);
+ mutex_unlock(&iommu_list_lock);
+}
+
+static int find_iommu_ctx(struct device *dev, void *data)
+{
+ struct msm_iommu_ctx_drvdata *c;
+
+ c = dev_get_drvdata(dev);
+ if (!c || !c->name)
+ return 0;
+
+ return !strcmp(data, c->name);
+}
+
+static struct device *find_context(struct device *dev, const char *name)
+{
+ return device_find_child(dev, (void *)name, find_iommu_ctx);
+}
+
+struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ struct msm_iommu_drvdata *drv;
+ struct device *dev = NULL;
+
+ mutex_lock(&iommu_list_lock);
+ list_for_each_entry(drv, &iommu_list, list) {
+ dev = find_context(drv->dev, ctx_name);
+ if (dev)
+ break;
+ }
+ mutex_unlock(&iommu_list_lock);
+
+ put_device(dev);
+
+ if (!dev || !dev_get_drvdata(dev)) {
+ pr_debug("Could not find context <%s>\n", ctx_name);
+ dev = ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL(msm_iommu_get_ctx);
+
+#ifdef CONFIG_ARM
+#ifdef CONFIG_IOMMU_LPAE
+#ifdef CONFIG_ARM_LPAE
+/*
+ * If CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are enabled we can use the MAIR
+ * register directly
+ */
+u32 msm_iommu_get_mair0(void)
+{
+ unsigned int mair0;
+
+ RCP15_MAIR0(mair0);
+ return mair0;
+}
+
+u32 msm_iommu_get_mair1(void)
+{
+ unsigned int mair1;
+
+ RCP15_MAIR1(mair1);
+ return mair1;
+}
+#else
+/*
+ * However, If CONFIG_ARM_LPAE is not enabled but CONFIG_IOMMU_LPAE is enabled
+ * we'll just use the hard coded values directly..
+ */
+u32 msm_iommu_get_mair0(void)
+{
+ return MAIR0_VALUE;
+}
+
+u32 msm_iommu_get_mair1(void)
+{
+ return MAIR1_VALUE;
+}
+#endif
+
+#else
+#ifdef CONFIG_ARM_LPAE
+/*
+ * If CONFIG_ARM_LPAE is enabled AND CONFIG_IOMMU_LPAE is disabled
+ * we must use the hardcoded values.
+ */
+u32 msm_iommu_get_prrr(void)
+{
+ return PRRR_VALUE;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ return NMRR_VALUE;
+}
+#else
+/*
+ * If both CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are disabled
+ * we can use the registers directly.
+ */
+#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+u32 msm_iommu_get_prrr(void)
+{
+ u32 prrr;
+
+ RCP15_PRRR(prrr);
+ return prrr;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ u32 nmrr;
+
+ RCP15_NMRR(nmrr);
+ return nmrr;
+}
+#endif
+#endif
+#endif
+#ifdef CONFIG_ARM64
+u32 msm_iommu_get_prrr(void)
+{
+ return PRRR_VALUE;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ return NMRR_VALUE;
+}
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_dev-v1.c b/drivers/iommu/qcom/msm_iommu_dev-v1.c
new file mode 100644
index 0000000000000..60ebc6eeafb6e
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_dev-v1.c
@@ -0,0 +1,708 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <linux/msm-bus.h>
+#endif
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+
+#include "msm_iommu_hw-v1.h"
+#include <linux/qcom_iommu.h>
+#include "msm_iommu_perfmon.h"
+#include <linux/qcom_scm.h>
+
+static const struct of_device_id msm_iommu_ctx_match_table[];
+
+#ifdef CONFIG_IOMMU_LPAE
+static const char *BFB_REG_NODE_NAME = "qcom,iommu-lpae-bfb-regs";
+static const char *BFB_DATA_NODE_NAME = "qcom,iommu-lpae-bfb-data";
+#else
+static const char *BFB_REG_NODE_NAME = "qcom,iommu-bfb-regs";
+static const char *BFB_DATA_NODE_NAME = "qcom,iommu-bfb-data";
+#endif
+
+static int msm_iommu_parse_bfb_settings(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ struct msm_iommu_bfb_settings *bfb_settings;
+ u32 nreg, nval;
+ int ret;
+
+ /*
+ * It is not valid for a device to have the BFB_REG_NODE_NAME
+ * property but not the BFB_DATA_NODE_NAME property, and vice versa.
+ */
+ if (!of_get_property(pdev->dev.of_node, BFB_REG_NODE_NAME, &nreg)) {
+ if (of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME,
+ &nval))
+ return -EINVAL;
+ return 0;
+ }
+
+ if (!of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME, &nval))
+ return -EINVAL;
+
+ if (nreg >= sizeof(bfb_settings->regs))
+ return -EINVAL;
+
+ if (nval >= sizeof(bfb_settings->data))
+ return -EINVAL;
+
+ if (nval != nreg)
+ return -EINVAL;
+
+ bfb_settings = devm_kzalloc(&pdev->dev, sizeof(*bfb_settings),
+ GFP_KERNEL);
+ if (!bfb_settings)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ BFB_REG_NODE_NAME,
+ bfb_settings->regs,
+ nreg / sizeof(*bfb_settings->regs));
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ BFB_DATA_NODE_NAME,
+ bfb_settings->data,
+ nval / sizeof(*bfb_settings->data));
+ if (ret)
+ return ret;
+
+ bfb_settings->length = nreg / sizeof(*bfb_settings->regs);
+
+ drvdata->bfb_settings = bfb_settings;
+
+ return 0;
+}
+
+static int __get_bus_vote_client(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ int ret = 0;
+#ifdef CONFIG_MSM_BUS_SCALING
+ struct msm_bus_scale_pdata *bs_table;
+ const char *dummy;
+
+ /* Check whether bus scaling has been specified for this node */
+ ret = of_property_read_string(pdev->dev.of_node, "qcom,msm-bus,name",
+ &dummy);
+ if (ret)
+ return 0;
+
+ bs_table = msm_bus_cl_get_pdata(pdev);
+ if (bs_table) {
+ drvdata->bus_client = msm_bus_scale_register_client(bs_table);
+ if (IS_ERR(&drvdata->bus_client)) {
+ pr_err("%s(): Bus client register failed.\n", __func__);
+ ret = -EINVAL;
+ }
+ }
+#endif
+ return ret;
+}
+
+static void __put_bus_vote_client(struct msm_iommu_drvdata *drvdata)
+{
+#ifdef CONFIG_MSM_BUS_SCALING
+ msm_bus_scale_unregister_client(drvdata->bus_client);
+ drvdata->bus_client = 0;
+#endif
+}
+
+/*
+ * CONFIG_IOMMU_NON_SECURE allows us to override the secure
+ * designation of SMMUs in device tree. With this config enabled
+ * all SMMUs will be programmed by this driver.
+ */
+#ifdef CONFIG_IOMMU_NON_SECURE
+static inline void get_secure_id(struct device_node *node,
+ struct msm_iommu_drvdata *drvdata)
+{
+}
+
+static inline void get_secure_ctx(struct device_node *node,
+ struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ ctx_drvdata->secure_context = 0;
+}
+#else
+static void get_secure_id(struct device_node *node,
+ struct msm_iommu_drvdata *drvdata)
+{
+ if (msm_iommu_get_scm_call_avail())
+ of_property_read_u32(node, "qcom,iommu-secure-id",
+ &drvdata->sec_id);
+}
+
+static void get_secure_ctx(struct device_node *node,
+ struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ u32 secure_ctx = 0;
+
+ if (msm_iommu_get_scm_call_avail())
+ secure_ctx = of_property_read_bool(node, "qcom,secure-context");
+
+ ctx_drvdata->secure_context = secure_ctx;
+}
+#endif
+
+static int msm_iommu_parse_dt(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ struct device_node *child;
+ int ret;
+
+ drvdata->dev = &pdev->dev;
+
+ ret = __get_bus_vote_client(pdev, drvdata);
+ if (ret)
+ goto fail;
+
+ ret = msm_iommu_parse_bfb_settings(pdev, drvdata);
+ if (ret)
+ goto fail;
+
+ for_each_available_child_of_node(pdev->dev.of_node, child)
+ drvdata->ncb++;
+
+ ret = of_property_read_string(pdev->dev.of_node, "label",
+ &drvdata->name);
+ if (ret)
+ goto fail;
+
+ drvdata->sec_id = -1;
+ get_secure_id(pdev->dev.of_node, drvdata);
+
+ drvdata->halt_enabled = of_property_read_bool(pdev->dev.of_node,
+ "qcom,iommu-enable-halt");
+
+ msm_iommu_add_drv(drvdata);
+
+ return 0;
+
+fail:
+ __put_bus_vote_client(drvdata);
+ return ret;
+}
+
+static int msm_iommu_pmon_parse_dt(struct platform_device *pdev,
+ struct iommu_pmon *pmon_info)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int cls_prop_size;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (irq <= 0) {
+ pmon_info->iommu.evt_irq = -1;
+ return irq;
+ }
+
+ pmon_info->iommu.evt_irq = irq;
+
+ ret = of_property_read_u32(np, "qcom,iommu-pmu-ngroups",
+ &pmon_info->num_groups);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-ngroups\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,iommu-pmu-ncounters",
+ &pmon_info->num_counters);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-ncounters\n");
+ return ret;
+ }
+
+ if (!of_get_property(np, "qcom,iommu-pmu-event-classes",
+ &cls_prop_size)) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n");
+ return -EINVAL;
+ }
+
+ pmon_info->event_cls_supported = devm_kzalloc(dev, cls_prop_size,
+ GFP_KERNEL);
+ if (!pmon_info->event_cls_supported) {
+ dev_err(dev, "Unable to get memory for event class array\n");
+ return -ENOMEM;
+ }
+
+ pmon_info->nevent_cls_supported = cls_prop_size / sizeof(u32);
+
+ ret = of_property_read_u32_array(np, "qcom,iommu-pmu-event-classes",
+ pmon_info->event_cls_supported,
+ pmon_info->nevent_cls_supported);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+#define SCM_SVC_MP 0xc
+#define MAXIMUM_VIRT_SIZE (300 * SZ_1M)
+#define MAKE_VERSION(major, minor, patch) \
+ (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+static int msm_iommu_sec_ptbl_init(struct device *dev)
+{
+ int psize[2] = {0, 0};
+ unsigned int spare = 0;
+ int ret;
+ int version;
+ void *cpu_addr;
+ dma_addr_t paddr;
+ DEFINE_DMA_ATTRS(attrs);
+ static bool allocated = false;
+
+ if (allocated)
+ return 0;
+
+ version = qcom_scm_get_feat_version(SCM_SVC_MP);
+
+ if (version >= MAKE_VERSION(1, 1, 1)) {
+ ret = qcom_scm_iommu_set_cp_pool_size(MAXIMUM_VIRT_SIZE, 0);
+ if (ret) {
+ dev_err(dev, "failed setting max virtual size (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = qcom_scm_iommu_secure_ptbl_size(spare, psize);
+ if (ret) {
+ dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
+ ret);
+ return ret;
+ }
+
+ if (psize[1]) {
+ dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
+ ret);
+ return psize[1];
+ }
+
+ dev_info(dev, "iommu sec: pgtable size: %d\n", psize[0]);
+
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+
+ cpu_addr = dma_alloc_attrs(dev, psize[0], &paddr, GFP_KERNEL, &attrs);
+ if (!cpu_addr) {
+ dev_err(dev, "failed to allocate %d bytes for pgtable\n",
+ psize[0]);
+ return -ENOMEM;
+ }
+
+ ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize[0], spare);
+ if (ret) {
+ dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
+ goto free_mem;
+ }
+
+ allocated = true;
+
+ return 0;
+
+free_mem:
+ dma_free_attrs(dev, psize[0], cpu_addr, paddr, &attrs);
+ return ret;
+}
+
+static int msm_iommu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct iommu_pmon *pmon_info;
+ struct msm_iommu_drvdata *drvdata;
+ struct resource *res;
+ int ret;
+ int global_cfg_irq, global_client_irq;
+ u32 temp;
+ unsigned long rate;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iommu_base");
+ drvdata->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+ drvdata->glb_base = drvdata->base;
+ drvdata->phys_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "smmu_local_base");
+ drvdata->smmu_local_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drvdata->smmu_local_base) &&
+ PTR_ERR(drvdata->smmu_local_base) != -EPROBE_DEFER)
+ drvdata->smmu_local_base = NULL;
+
+ if (of_device_is_compatible(np, "qcom,msm-mmu-500"))
+ drvdata->model = MMU_500;
+
+ drvdata->iface = devm_clk_get(dev, "iface_clk");
+ if (IS_ERR(drvdata->iface))
+ return PTR_ERR(drvdata->iface);
+
+ drvdata->core = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(drvdata->core))
+ return PTR_ERR(drvdata->core);
+
+ if (!of_property_read_u32(np, "qcom,cb-base-offset", &temp))
+ drvdata->cb_base = drvdata->base + temp;
+ else
+ drvdata->cb_base = drvdata->base + 0x8000;
+
+ rate = clk_get_rate(drvdata->core);
+ if (!rate) {
+ rate = clk_round_rate(drvdata->core, 1000);
+ clk_set_rate(drvdata->core, rate);
+ }
+
+ dev_info(&pdev->dev, "iface: %lu, core: %lu\n",
+ clk_get_rate(drvdata->iface), clk_get_rate(drvdata->core));
+
+ ret = msm_iommu_parse_dt(pdev, drvdata);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "device %s (model: %d) mapped at %p, with %d ctx banks\n",
+ drvdata->name, drvdata->model, drvdata->base, drvdata->ncb);
+
+ if (drvdata->sec_id != -1) {
+ ret = msm_iommu_sec_ptbl_init(dev);
+ if (ret)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, drvdata);
+
+ pmon_info = msm_iommu_pm_alloc(dev);
+ if (pmon_info) {
+ ret = msm_iommu_pmon_parse_dt(pdev, pmon_info);
+ if (ret) {
+ msm_iommu_pm_free(dev);
+ dev_info(dev, "%s: pmon not available\n",
+ drvdata->name);
+ } else {
+ pmon_info->iommu.base = drvdata->base;
+ pmon_info->iommu.ops = msm_get_iommu_access_ops();
+ pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v1();
+ pmon_info->iommu.iommu_name = drvdata->name;
+ ret = msm_iommu_pm_iommu_register(pmon_info);
+ if (ret) {
+ dev_err(dev, "%s iommu register fail\n",
+ drvdata->name);
+ msm_iommu_pm_free(dev);
+ } else {
+ dev_dbg(dev, "%s iommu registered for pmon\n",
+ pmon_info->iommu.iommu_name);
+ }
+ }
+ }
+
+ global_cfg_irq = platform_get_irq_byname(pdev, "global_cfg_NS_irq");
+ if (global_cfg_irq < 0 && global_cfg_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (global_cfg_irq > 0) {
+ ret = devm_request_threaded_irq(dev, global_cfg_irq,
+ NULL,
+ msm_iommu_global_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED |
+ IRQF_TRIGGER_RISING,
+ "msm_iommu_global_cfg_irq",
+ pdev);
+ if (ret < 0)
+ dev_err(dev, "Request Global CFG IRQ %d failed with ret=%d\n",
+ global_cfg_irq, ret);
+ }
+
+ global_client_irq =
+ platform_get_irq_byname(pdev, "global_client_NS_irq");
+ if (global_client_irq < 0 && global_client_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (global_client_irq > 0) {
+ ret = devm_request_threaded_irq(dev, global_client_irq,
+ NULL,
+ msm_iommu_global_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED |
+ IRQF_TRIGGER_RISING,
+ "msm_iommu_global_client_irq",
+ pdev);
+ if (ret < 0)
+ dev_err(dev, "Request Global Client IRQ %d failed with ret=%d\n",
+ global_client_irq, ret);
+ }
+
+ ret = of_platform_populate(np, msm_iommu_ctx_match_table, NULL, dev);
+ if (ret)
+ dev_err(dev, "Failed to create iommu context device\n");
+
+ return ret;
+}
+
+static int msm_iommu_remove(struct platform_device *pdev)
+{
+ struct msm_iommu_drvdata *drv;
+
+ msm_iommu_pm_iommu_unregister(&pdev->dev);
+ msm_iommu_pm_free(&pdev->dev);
+
+ drv = platform_get_drvdata(pdev);
+ if (drv) {
+ __put_bus_vote_client(drv);
+ msm_iommu_remove_drv(drv);
+ platform_set_drvdata(pdev, NULL);
+ }
+
+ return 0;
+}
+
+static int msm_iommu_ctx_parse_dt(struct platform_device *pdev,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ struct resource *r, rp;
+ int irq = 0, ret = 0;
+ struct msm_iommu_drvdata *drvdata;
+ u32 nsid;
+ u32 n_sid_mask;
+ unsigned long cb_offset;
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+
+ get_secure_ctx(pdev->dev.of_node, drvdata, ctx_drvdata);
+
+ if (ctx_drvdata->secure_context) {
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ msm_iommu_secure_fault_handler_v2,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_secure_irq", pdev);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n",
+ irq, ret);
+ return ret;
+ }
+ }
+ } else {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ msm_iommu_fault_handler_v2,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_nonsecure_irq", pdev);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n",
+ irq, ret);
+ goto out;
+ }
+ }
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_address_to_resource(pdev->dev.parent->of_node, 0, &rp);
+ if (ret)
+ goto out;
+
+ /* Calculate the context bank number using the base addresses.
+ * Typically CB0 base address is 0x8000 pages away if the number
+ * of CBs are <=8. So, assume the offset 0x8000 until mentioned
+ * explicitely.
+ */
+ cb_offset = drvdata->cb_base - drvdata->base;
+ ctx_drvdata->num = (r->start - rp.start - cb_offset) >> CTX_SHIFT;
+
+ if (of_property_read_string(pdev->dev.of_node, "label",
+ &ctx_drvdata->name))
+ ctx_drvdata->name = dev_name(&pdev->dev);
+
+ if (!of_get_property(pdev->dev.of_node, "qcom,iommu-ctx-sids", &nsid)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (nsid >= sizeof(ctx_drvdata->sids)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-ctx-sids",
+ ctx_drvdata->sids,
+ nsid / sizeof(*ctx_drvdata->sids))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx_drvdata->nsid = nsid;
+ ctx_drvdata->asid = -1;
+
+ if (!of_get_property(pdev->dev.of_node, "qcom,iommu-sid-mask",
+ &n_sid_mask)) {
+ memset(ctx_drvdata->sid_mask, 0, MAX_NUM_SMR);
+ goto out;
+ }
+
+ if (n_sid_mask != nsid) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-sid-mask",
+ ctx_drvdata->sid_mask,
+ n_sid_mask / sizeof(*ctx_drvdata->sid_mask))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx_drvdata->n_sid_mask = n_sid_mask;
+
+out:
+ return ret;
+}
+
+static int msm_iommu_ctx_probe(struct platform_device *pdev)
+{
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret;
+
+ if (!pdev->dev.parent)
+ return -EINVAL;
+
+ ctx_drvdata = devm_kzalloc(&pdev->dev, sizeof(*ctx_drvdata),
+ GFP_KERNEL);
+ if (!ctx_drvdata)
+ return -ENOMEM;
+
+ ctx_drvdata->pdev = pdev;
+ INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
+
+ ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, ctx_drvdata);
+
+ dev_info(&pdev->dev, "context %s using bank %d\n",
+ ctx_drvdata->name, ctx_drvdata->num);
+
+ return 0;
+}
+
+static int msm_iommu_ctx_remove(struct platform_device *pdev)
+{
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id msm_iommu_match_table[] = {
+ { .compatible = "qcom,msm-smmu-v1", },
+ { .compatible = "qcom,msm-smmu-v2", },
+ {}
+};
+
+static struct platform_driver msm_iommu_driver = {
+ .driver = {
+ .name = "msm_iommu",
+ .of_match_table = msm_iommu_match_table,
+ },
+ .probe = msm_iommu_probe,
+ .remove = msm_iommu_remove,
+};
+
+static const struct of_device_id msm_iommu_ctx_match_table[] = {
+ { .compatible = "qcom,msm-smmu-v1-ctx", },
+ { .compatible = "qcom,msm-smmu-v2-ctx", },
+ {}
+};
+
+static struct platform_driver msm_iommu_ctx_driver = {
+ .driver = {
+ .name = "msm_iommu_ctx",
+ .of_match_table = msm_iommu_ctx_match_table,
+ },
+ .probe = msm_iommu_ctx_probe,
+ .remove = msm_iommu_ctx_remove,
+};
+
+static int __init msm_iommu_driver_init(void)
+{
+ int ret;
+
+ msm_iommu_check_scm_call_avail();
+ msm_set_iommu_access_ops(&iommu_access_ops_v1);
+ msm_iommu_sec_set_access_ops(&iommu_access_ops_v1);
+
+ ret = platform_driver_register(&msm_iommu_driver);
+ if (ret) {
+ pr_err("Failed to register IOMMU driver\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&msm_iommu_ctx_driver);
+ if (ret) {
+ pr_err("Failed to register IOMMU context driver\n");
+ platform_driver_unregister(&msm_iommu_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit msm_iommu_driver_exit(void)
+{
+ platform_driver_unregister(&msm_iommu_ctx_driver);
+ platform_driver_unregister(&msm_iommu_driver);
+}
+
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/qcom/msm_iommu_hw-v1.h b/drivers/iommu/qcom/msm_iommu_hw-v1.h
new file mode 100644
index 0000000000000..53e2f4874adb5
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_hw-v1.h
@@ -0,0 +1,2320 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+
+#define CTX_SHIFT 12
+
+#define CTX_REG(reg, base, ctx) \
+ ((base) + (reg) + ((ctx) << CTX_SHIFT))
+#define GLB_REG(reg, base) \
+ ((base) + (reg))
+#define GLB_REG_N(b, n, r) GLB_REG(b, ((r) + ((n) << 2)))
+#define GLB_FIELD(b, r) ((b) + (r))
+#define GLB_CTX_FIELD(b, c, r) (GLB_FIELD(b, r) + ((c) << CTX_SHIFT))
+#define GLB_FIELD_N(b, n, r) (GLB_FIELD(b, r) + ((n) << 2))
+
+
+#define GET_GLOBAL_REG(reg, base) (readl_relaxed(GLB_REG(reg, base)))
+#define GET_GLOBAL_REG_Q(reg, base) (readq_relaxed(GLB_REG(reg, base)))
+#define GET_CTX_REG(reg, base, ctx) (readl_relaxed(CTX_REG(reg, base, ctx)))
+#define GET_CTX_REG_Q(reg, base, ctx) (readq_relaxed(CTX_REG(reg, base, ctx)))
+
+#define SET_GLOBAL_REG(reg, base, val) writel_relaxed((val), GLB_REG(reg, base))
+#define SET_GLOBAL_REG_Q(reg, base, val) \
+ (writeq_relaxed((val), GLB_REG(reg, base)))
+
+#define SET_CTX_REG(reg, base, ctx, val) \
+ writel_relaxed((val), (CTX_REG(reg, base, ctx)))
+#define SET_CTX_REG_Q(reg, base, ctx, val) \
+ writeq_relaxed((val), CTX_REG(reg, base, ctx))
+
+/* Wrappers for numbered registers */
+#define SET_GLOBAL_REG_N(b, n, r, v) writel_relaxed(((v)), GLB_REG_N(b, n, r))
+#define GET_GLOBAL_REG_N(b, n, r) (readl_relaxed(GLB_REG_N(b, n, r)))
+
+/* Field wrappers */
+#define GET_GLOBAL_FIELD(b, r, F) \
+ GET_FIELD(GLB_FIELD(b, r), r##_##F##_MASK, r##_##F##_SHIFT)
+#define GET_CONTEXT_FIELD(b, c, r, F) \
+ GET_FIELD(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT)
+#define GET_CONTEXT_FIELD_Q(b, c, r, F) \
+ GET_FIELD_Q(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define SET_GLOBAL_FIELD(b, r, F, v) \
+ SET_FIELD(GLB_FIELD(b, r), r##_##F##_MASK, r##_##F##_SHIFT, (v))
+#define SET_CONTEXT_FIELD(b, c, r, F, v) \
+ SET_FIELD(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT, (v))
+#define SET_CONTEXT_FIELD_Q(b, c, r, F, v) \
+ SET_FIELD_Q(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT, (v))
+
+/* Wrappers for numbered field registers */
+#define SET_GLOBAL_FIELD_N(b, n, r, F, v) \
+ SET_FIELD(GLB_FIELD_N(b, n, r), r##_##F##_MASK, r##_##F##_SHIFT, v)
+#define GET_GLOBAL_FIELD_N(b, n, r, F) \
+ GET_FIELD(GLB_FIELD_N(b, n, r), r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define GET_FIELD(addr, mask, shift) ((readl_relaxed(addr) >> (shift)) & (mask))
+#define GET_FIELD_Q(addr, mask, shift) \
+ ((readq_relaxed(addr) >> (shift)) & (mask))
+
+#define SET_FIELD(addr, mask, shift, v) \
+do { \
+ int t = readl_relaxed(addr); \
+ writel_relaxed((t & ~((mask) << (shift))) + (((v) & \
+ (mask)) << (shift)), addr); \
+} while (0)
+
+#define SET_FIELD_Q(addr, mask, shift, v) \
+do { \
+ u64 t = readq_relaxed(addr); \
+ writeq_relaxed((t & ~(((u64) mask) << (shift))) + (((v) & \
+ ((u64) mask)) << (shift)), addr); \
+} while (0)
+
+
+/* Global register space 0 setters / getters */
+#define SET_CR0(b, v) SET_GLOBAL_REG(CR0, (b), (v))
+#define SET_SCR1(b, v) SET_GLOBAL_REG(SCR1, (b), (v))
+#define SET_CR2(b, v) SET_GLOBAL_REG(CR2, (b), (v))
+#define SET_ACR(b, v) SET_GLOBAL_REG(ACR, (b), (v))
+#define SET_IDR0(b, N, v) SET_GLOBAL_REG(IDR0, (b), (v))
+#define SET_IDR1(b, N, v) SET_GLOBAL_REG(IDR1, (b), (v))
+#define SET_IDR2(b, N, v) SET_GLOBAL_REG(IDR2, (b), (v))
+#define SET_IDR7(b, N, v) SET_GLOBAL_REG(IDR7, (b), (v))
+#define SET_GFAR(b, v) SET_GLOBAL_REG_Q(GFAR, (b), (v))
+#define SET_GFSR(b, v) SET_GLOBAL_REG(GFSR, (b), (v))
+#define SET_GFSRRESTORE(b, v) SET_GLOBAL_REG(GFSRRESTORE, (b), (v))
+#define SET_GFSYNR0(b, v) SET_GLOBAL_REG(GFSYNR0, (b), (v))
+#define SET_GFSYNR1(b, v) SET_GLOBAL_REG(GFSYNR1, (b), (v))
+#define SET_GFSYNR2(b, v) SET_GLOBAL_REG(GFSYNR2, (b), (v))
+#define SET_TLBIVMID(b, v) SET_GLOBAL_REG(TLBIVMID, (b), (v))
+#define SET_TLBIALLNSNH(b, v) SET_GLOBAL_REG(TLBIALLNSNH, (b), (v))
+#define SET_TLBIALLH(b, v) SET_GLOBAL_REG(TLBIALLH, (b), (v))
+#define SET_TLBGSYNC(b, v) SET_GLOBAL_REG(TLBGSYNC, (b), (v))
+#define SET_TLBGSTATUS(b, v) SET_GLOBAL_REG(TLBGSTATUS, (b), (v))
+#define SET_TLBIVAH(b, v) SET_GLOBAL_REG(TLBIVAH, (b), (v))
+#define SET_GATS1UR(b, v) SET_GLOBAL_REG(GATS1UR, (b), (v))
+#define SET_GATS1UW(b, v) SET_GLOBAL_REG(GATS1UW, (b), (v))
+#define SET_GATS1PR(b, v) SET_GLOBAL_REG(GATS1PR, (b), (v))
+#define SET_GATS1PW(b, v) SET_GLOBAL_REG(GATS1PW, (b), (v))
+#define SET_GATS12UR(b, v) SET_GLOBAL_REG(GATS12UR, (b), (v))
+#define SET_GATS12UW(b, v) SET_GLOBAL_REG(GATS12UW, (b), (v))
+#define SET_GATS12PR(b, v) SET_GLOBAL_REG(GATS12PR, (b), (v))
+#define SET_GATS12PW(b, v) SET_GLOBAL_REG(GATS12PW, (b), (v))
+#define SET_GPAR(b, v) SET_GLOBAL_REG(GPAR, (b), (v))
+#define SET_GATSR(b, v) SET_GLOBAL_REG(GATSR, (b), (v))
+#define SET_NSCR0(b, v) SET_GLOBAL_REG(NSCR0, (b), (v))
+#define SET_NSCR2(b, v) SET_GLOBAL_REG(NSCR2, (b), (v))
+#define SET_NSACR(b, v) SET_GLOBAL_REG(NSACR, (b), (v))
+#define SET_NSGFAR(b, v) SET_GLOBAL_REG(NSGFAR, (b), (v))
+#define SET_NSGFSRRESTORE(b, v) SET_GLOBAL_REG(NSGFSRRESTORE, (b), (v))
+#define SET_PMCR(b, v) SET_GLOBAL_REG(PMCR, (b), (v))
+#define SET_SMR_N(b, N, v) SET_GLOBAL_REG_N(SMR, N, (b), (v))
+#define SET_S2CR_N(b, N, v) SET_GLOBAL_REG_N(S2CR, N, (b), (v))
+
+#define GET_CR0(b) GET_GLOBAL_REG(CR0, (b))
+#define GET_SCR1(b) GET_GLOBAL_REG(SCR1, (b))
+#define GET_CR2(b) GET_GLOBAL_REG(CR2, (b))
+#define GET_ACR(b) GET_GLOBAL_REG(ACR, (b))
+#define GET_IDR0(b, N) GET_GLOBAL_REG(IDR0, (b))
+#define GET_IDR1(b, N) GET_GLOBAL_REG(IDR1, (b))
+#define GET_IDR2(b, N) GET_GLOBAL_REG(IDR2, (b))
+#define GET_IDR7(b, N) GET_GLOBAL_REG(IDR7, (b))
+#define GET_GFAR(b) GET_GLOBAL_REG_Q(GFAR, (b))
+#define GET_GFSR(b) GET_GLOBAL_REG(GFSR, (b))
+#define GET_GFSRRESTORE(b) GET_GLOBAL_REG(GFSRRESTORE, (b))
+#define GET_GFSYNR0(b) GET_GLOBAL_REG(GFSYNR0, (b))
+#define GET_GFSYNR1(b) GET_GLOBAL_REG(GFSYNR1, (b))
+#define GET_GFSYNR2(b) GET_GLOBAL_REG(GFSYNR2, (b))
+#define GET_TLBIVMID(b) GET_GLOBAL_REG(TLBIVMID, (b))
+#define GET_TLBIALLNSNH(b) GET_GLOBAL_REG(TLBIALLNSNH, (b))
+#define GET_TLBIALLH(b) GET_GLOBAL_REG(TLBIALLH, (b))
+#define GET_TLBGSYNC(b) GET_GLOBAL_REG(TLBGSYNC, (b))
+#define GET_TLBGSTATUS(b) GET_GLOBAL_REG(TLBGSTATUS, (b))
+#define GET_TLBIVAH(b) GET_GLOBAL_REG(TLBIVAH, (b))
+#define GET_GATS1UR(b) GET_GLOBAL_REG(GATS1UR, (b))
+#define GET_GATS1UW(b) GET_GLOBAL_REG(GATS1UW, (b))
+#define GET_GATS1PR(b) GET_GLOBAL_REG(GATS1PR, (b))
+#define GET_GATS1PW(b) GET_GLOBAL_REG(GATS1PW, (b))
+#define GET_GATS12UR(b) GET_GLOBAL_REG(GATS12UR, (b))
+#define GET_GATS12UW(b) GET_GLOBAL_REG(GATS12UW, (b))
+#define GET_GATS12PR(b) GET_GLOBAL_REG(GATS12PR, (b))
+#define GET_GATS12PW(b) GET_GLOBAL_REG(GATS12PW, (b))
+#define GET_GPAR(b) GET_GLOBAL_REG(GPAR, (b))
+#define GET_GATSR(b) GET_GLOBAL_REG(GATSR, (b))
+#define GET_NSCR0(b) GET_GLOBAL_REG(NSCR0, (b))
+#define GET_NSCR2(b) GET_GLOBAL_REG(NSCR2, (b))
+#define GET_NSACR(b) GET_GLOBAL_REG(NSACR, (b))
+#define GET_PMCR(b) GET_GLOBAL_REG(PMCR, (b))
+#define GET_SMR_N(b, N) GET_GLOBAL_REG_N(SMR, N, (b))
+#define GET_S2CR_N(b, N) GET_GLOBAL_REG_N(S2CR, N, (b))
+
+/* Global register space 1 setters / getters */
+#define SET_CBAR_N(b, N, v) SET_GLOBAL_REG_N(CBAR, N, (b), (v))
+#define SET_CBFRSYNRA_N(b, N, v) SET_GLOBAL_REG_N(CBFRSYNRA, N, (b), (v))
+
+#define GET_CBAR_N(b, N) GET_GLOBAL_REG_N(CBAR, N, (b))
+#define GET_CBFRSYNRA_N(b, N) GET_GLOBAL_REG_N(CBFRSYNRA, N, (b))
+
+/* Implementation defined register setters/getters */
+#define SET_MICRO_MMU_CTRL_HALT_REQ(b, v) \
+ SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, HALT_REQ, v)
+#define GET_MICRO_MMU_CTRL_IDLE(b) \
+ GET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, IDLE)
+#define SET_MICRO_MMU_CTRL_RESERVED(b, v) \
+ SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, RESERVED, v)
+
+#define MMU_CTRL_IDLE (MICRO_MMU_CTRL_IDLE_MASK << MICRO_MMU_CTRL_IDLE_SHIFT)
+
+#define SET_PREDICTIONDIS0(b, v) SET_GLOBAL_REG(PREDICTIONDIS0, (b), (v))
+#define SET_PREDICTIONDIS1(b, v) SET_GLOBAL_REG(PREDICTIONDIS1, (b), (v))
+#define SET_S1L1BFBLP0(b, v) SET_GLOBAL_REG(S1L1BFBLP0, (b), (v))
+
+/* SSD register setters/getters */
+#define SET_SSDR_N(b, N, v) SET_GLOBAL_REG_N(SSDR_N, N, (b), (v))
+
+#define GET_SSDR_N(b, N) GET_GLOBAL_REG_N(SSDR_N, N, (b))
+
+/* Context bank register setters/getters */
+#define SET_SCTLR(b, c, v) SET_CTX_REG(CB_SCTLR, (b), (c), (v))
+#define SET_ACTLR(b, c, v) SET_CTX_REG(CB_ACTLR, (b), (c), (v))
+#define SET_RESUME(b, c, v) SET_CTX_REG(CB_RESUME, (b), (c), (v))
+#define SET_TTBCR(b, c, v) SET_CTX_REG(CB_TTBCR, (b), (c), (v))
+#define SET_CONTEXTIDR(b, c, v) SET_CTX_REG(CB_CONTEXTIDR, (b), (c), (v))
+#define SET_PRRR(b, c, v) SET_CTX_REG(CB_PRRR, (b), (c), (v))
+#define SET_NMRR(b, c, v) SET_CTX_REG(CB_NMRR, (b), (c), (v))
+#define SET_PAR(b, c, v) SET_CTX_REG(CB_PAR, (b), (c), (v))
+#define SET_FSR(b, c, v) SET_CTX_REG(CB_FSR, (b), (c), (v))
+#define SET_FSRRESTORE(b, c, v) SET_CTX_REG(CB_FSRRESTORE, (b), (c), (v))
+#define SET_FAR(b, c, v) SET_CTX_REG(CB_FAR, (b), (c), (v))
+#define SET_FSYNR0(b, c, v) SET_CTX_REG(CB_FSYNR0, (b), (c), (v))
+#define SET_FSYNR1(b, c, v) SET_CTX_REG(CB_FSYNR1, (b), (c), (v))
+#define SET_TLBIVA(b, c, v) SET_CTX_REG(CB_TLBIVA, (b), (c), (v))
+#define SET_TLBIVAA(b, c, v) SET_CTX_REG(CB_TLBIVAA, (b), (c), (v))
+#define SET_TLBIASID(b, c, v) SET_CTX_REG(CB_TLBIASID, (b), (c), (v))
+#define SET_TLBIALL(b, c, v) SET_CTX_REG(CB_TLBIALL, (b), (c), (v))
+#define SET_TLBIVAL(b, c, v) SET_CTX_REG(CB_TLBIVAL, (b), (c), (v))
+#define SET_TLBIVAAL(b, c, v) SET_CTX_REG(CB_TLBIVAAL, (b), (c), (v))
+#define SET_TLBSYNC(b, c, v) SET_CTX_REG(CB_TLBSYNC, (b), (c), (v))
+#define SET_TLBSTATUS(b, c, v) SET_CTX_REG(CB_TLBSTATUS, (b), (c), (v))
+#define SET_ATS1PR(b, c, v) SET_CTX_REG(CB_ATS1PR, (b), (c), (v))
+#define SET_ATS1PW(b, c, v) SET_CTX_REG(CB_ATS1PW, (b), (c), (v))
+#define SET_ATS1UR(b, c, v) SET_CTX_REG(CB_ATS1UR, (b), (c), (v))
+#define SET_ATS1UW(b, c, v) SET_CTX_REG(CB_ATS1UW, (b), (c), (v))
+#define SET_ATSR(b, c, v) SET_CTX_REG(CB_ATSR, (b), (c), (v))
+
+#define GET_SCTLR(b, c) GET_CTX_REG(CB_SCTLR, (b), (c))
+#define GET_ACTLR(b, c) GET_CTX_REG(CB_ACTLR, (b), (c))
+#define GET_RESUME(b, c) GET_CTX_REG(CB_RESUME, (b), (c))
+#define GET_TTBR0(b, c) GET_CTX_REG(CB_TTBR0, (b), (c))
+#define GET_TTBR1(b, c) GET_CTX_REG(CB_TTBR1, (b), (c))
+#define GET_TTBCR(b, c) GET_CTX_REG(CB_TTBCR, (b), (c))
+#define GET_CONTEXTIDR(b, c) GET_CTX_REG(CB_CONTEXTIDR, (b), (c))
+#define GET_PRRR(b, c) GET_CTX_REG(CB_PRRR, (b), (c))
+#define GET_NMRR(b, c) GET_CTX_REG(CB_NMRR, (b), (c))
+#define GET_PAR(b, c) GET_CTX_REG_Q(CB_PAR, (b), (c))
+#define GET_FSR(b, c) GET_CTX_REG(CB_FSR, (b), (c))
+#define GET_FSRRESTORE(b, c) GET_CTX_REG(CB_FSRRESTORE, (b), (c))
+#define GET_FAR(b, c) GET_CTX_REG_Q(CB_FAR, (b), (c))
+#define GET_FSYNR0(b, c) GET_CTX_REG(CB_FSYNR0, (b), (c))
+#define GET_FSYNR1(b, c) GET_CTX_REG(CB_FSYNR1, (b), (c))
+#define GET_TLBIVA(b, c) GET_CTX_REG(CB_TLBIVA, (b), (c))
+#define GET_TLBIVAA(b, c) GET_CTX_REG(CB_TLBIVAA, (b), (c))
+#define GET_TLBIASID(b, c) GET_CTX_REG(CB_TLBIASID, (b), (c))
+#define GET_TLBIALL(b, c) GET_CTX_REG(CB_TLBIALL, (b), (c))
+#define GET_TLBIVAL(b, c) GET_CTX_REG(CB_TLBIVAL, (b), (c))
+#define GET_TLBIVAAL(b, c) GET_CTX_REG(CB_TLBIVAAL, (b), (c))
+#define GET_TLBSYNC(b, c) GET_CTX_REG(CB_TLBSYNC, (b), (c))
+#define GET_TLBSTATUS(b, c) GET_CTX_REG(CB_TLBSTATUS, (b), (c))
+#define GET_ATS1PR(b, c) GET_CTX_REG(CB_ATS1PR, (b), (c))
+#define GET_ATS1PW(b, c) GET_CTX_REG(CB_ATS1PW, (b), (c))
+#define GET_ATS1UR(b, c) GET_CTX_REG(CB_ATS1UR, (b), (c))
+#define GET_ATS1UW(b, c) GET_CTX_REG(CB_ATS1UW, (b), (c))
+#define GET_ATSR(b, c) GET_CTX_REG(CB_ATSR, (b), (c))
+
+/* Global Register field setters / getters */
+/* Configuration Register: CR0/NSCR0 */
+#define SET_CR0_NSCFG(b, v) SET_GLOBAL_FIELD(b, CR0, NSCFG, v)
+#define SET_CR0_WACFG(b, v) SET_GLOBAL_FIELD(b, CR0, WACFG, v)
+#define SET_CR0_RACFG(b, v) SET_GLOBAL_FIELD(b, CR0, RACFG, v)
+#define SET_CR0_SHCFG(b, v) SET_GLOBAL_FIELD(b, CR0, SHCFG, v)
+#define SET_CR0_SMCFCFG(b, v) SET_GLOBAL_FIELD(b, CR0, SMCFCFG, v)
+#define SET_NSCR0_SMCFCFG(b, v) SET_GLOBAL_FIELD(b, NSCR0, SMCFCFG, v)
+#define SET_CR0_MTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, MTCFG, v)
+#define SET_CR0_BSU(b, v) SET_GLOBAL_FIELD(b, CR0, BSU, v)
+#define SET_CR0_FB(b, v) SET_GLOBAL_FIELD(b, CR0, FB, v)
+#define SET_CR0_PTM(b, v) SET_GLOBAL_FIELD(b, CR0, PTM, v)
+#define SET_CR0_VMIDPNE(b, v) SET_GLOBAL_FIELD(b, CR0, VMIDPNE, v)
+#define SET_CR0_USFCFG(b, v) SET_GLOBAL_FIELD(b, CR0, USFCFG, v)
+#define SET_NSCR0_USFCFG(b, v) SET_GLOBAL_FIELD(b, NSCR0, USFCFG, v)
+#define SET_CR0_GSE(b, v) SET_GLOBAL_FIELD(b, CR0, GSE, v)
+#define SET_CR0_STALLD(b, v) SET_GLOBAL_FIELD(b, CR0, STALLD, v)
+#define SET_NSCR0_STALLD(b, v) SET_GLOBAL_FIELD(b, NSCR0, STALLD, v)
+#define SET_CR0_TRANSIENTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG, v)
+#define SET_CR0_GCFGFIE(b, v) SET_GLOBAL_FIELD(b, CR0, GCFGFIE, v)
+#define SET_NSCR0_GCFGFIE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GCFGFIE, v)
+#define SET_CR0_GCFGFRE(b, v) SET_GLOBAL_FIELD(b, CR0, GCFGFRE, v)
+#define SET_NSCR0_GCFGFRE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GCFGFRE, v)
+#define SET_CR0_GFIE(b, v) SET_GLOBAL_FIELD(b, CR0, GFIE, v)
+#define SET_NSCR0_GFIE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GFIE, v)
+#define SET_CR0_GFRE(b, v) SET_GLOBAL_FIELD(b, CR0, GFRE, v)
+#define SET_NSCR0_GFRE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GFRE, v)
+#define SET_CR0_CLIENTPD(b, v) SET_GLOBAL_FIELD(b, CR0, CLIENTPD, v)
+#define SET_NSCR0_CLIENTPD(b, v) SET_GLOBAL_FIELD(b, NSCR0, CLIENTPD, v)
+
+#define SET_ACR_SMTNMC_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, SMTNMC_BPTLBEN, v)
+#define SET_ACR_MMUDIS_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, MMUDIS_BPTLBEN, v)
+#define SET_ACR_S2CR_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, S2CR_BPTLBEN, v)
+
+#define SET_NSACR_SMTNMC_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, SMTNMC_BPTLBEN, v)
+#define SET_NSACR_MMUDIS_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, MMUDIS_BPTLBEN, v)
+#define SET_NSACR_S2CR_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, S2CR_BPTLBEN, v)
+
+#define GET_CR0_NSCFG(b) GET_GLOBAL_FIELD(b, CR0, NSCFG)
+#define GET_CR0_WACFG(b) GET_GLOBAL_FIELD(b, CR0, WACFG)
+#define GET_CR0_RACFG(b) GET_GLOBAL_FIELD(b, CR0, RACFG)
+#define GET_CR0_SHCFG(b) GET_GLOBAL_FIELD(b, CR0, SHCFG)
+#define GET_CR0_SMCFCFG(b) GET_GLOBAL_FIELD(b, CR0, SMCFCFG)
+#define GET_CR0_MTCFG(b) GET_GLOBAL_FIELD(b, CR0, MTCFG)
+#define GET_CR0_BSU(b) GET_GLOBAL_FIELD(b, CR0, BSU)
+#define GET_CR0_FB(b) GET_GLOBAL_FIELD(b, CR0, FB)
+#define GET_CR0_PTM(b) GET_GLOBAL_FIELD(b, CR0, PTM)
+#define GET_CR0_VMIDPNE(b) GET_GLOBAL_FIELD(b, CR0, VMIDPNE)
+#define GET_CR0_USFCFG(b) GET_GLOBAL_FIELD(b, CR0, USFCFG)
+#define GET_CR0_GSE(b) GET_GLOBAL_FIELD(b, CR0, GSE)
+#define GET_CR0_STALLD(b) GET_GLOBAL_FIELD(b, CR0, STALLD)
+#define GET_CR0_TRANSIENTCFG(b) GET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG)
+#define GET_CR0_GCFGFIE(b) GET_GLOBAL_FIELD(b, CR0, GCFGFIE)
+#define GET_CR0_GCFGFRE(b) GET_GLOBAL_FIELD(b, CR0, GCFGFRE)
+#define GET_CR0_GFIE(b) GET_GLOBAL_FIELD(b, CR0, GFIE)
+#define GET_CR0_GFRE(b) GET_GLOBAL_FIELD(b, CR0, GFRE)
+#define GET_CR0_CLIENTPD(b) GET_GLOBAL_FIELD(b, CR0, CLIENTPD)
+
+/* Configuration Register: CR2 */
+#define SET_CR2_BPVMID(b, v) SET_GLOBAL_FIELD(b, CR2, BPVMID, v)
+
+#define GET_CR2_BPVMID(b) GET_GLOBAL_FIELD(b, CR2, BPVMID)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define SET_GATS1PR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1PR, ADDR, v)
+#define SET_GATS1PR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1PR, NDX, v)
+
+#define GET_GATS1PR_ADDR(b) GET_GLOBAL_FIELD(b, GATS1PR, ADDR)
+#define GET_GATS1PR_NDX(b) GET_GLOBAL_FIELD(b, GATS1PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS1PW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1PW, ADDR, v)
+#define SET_GATS1PW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1PW, NDX, v)
+
+#define GET_GATS1PW_ADDR(b) GET_GLOBAL_FIELD(b, GATS1PW, ADDR)
+#define GET_GATS1PW_NDX(b) GET_GLOBAL_FIELD(b, GATS1PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS1UR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1UR, ADDR, v)
+#define SET_GATS1UR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1UR, NDX, v)
+
+#define GET_GATS1UR_ADDR(b) GET_GLOBAL_FIELD(b, GATS1UR, ADDR)
+#define GET_GATS1UR_NDX(b) GET_GLOBAL_FIELD(b, GATS1UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS1UW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1UW, ADDR, v)
+#define SET_GATS1UW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1UW, NDX, v)
+
+#define GET_GATS1UW_ADDR(b) GET_GLOBAL_FIELD(b, GATS1UW, ADDR)
+#define GET_GATS1UW_NDX(b) GET_GLOBAL_FIELD(b, GATS1UW, NDX)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define SET_GATS12PR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12PR, ADDR, v)
+#define SET_GATS12PR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12PR, NDX, v)
+
+#define GET_GATS12PR_ADDR(b) GET_GLOBAL_FIELD(b, GATS12PR, ADDR)
+#define GET_GATS12PR_NDX(b) GET_GLOBAL_FIELD(b, GATS12PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS12PW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12PW, ADDR, v)
+#define SET_GATS12PW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12PW, NDX, v)
+
+#define GET_GATS12PW_ADDR(b) GET_GLOBAL_FIELD(b, GATS12PW, ADDR)
+#define GET_GATS12PW_NDX(b) GET_GLOBAL_FIELD(b, GATS12PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS12UR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12UR, ADDR, v)
+#define SET_GATS12UR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12UR, NDX, v)
+
+#define GET_GATS12UR_ADDR(b) GET_GLOBAL_FIELD(b, GATS12UR, ADDR)
+#define GET_GATS12UR_NDX(b) GET_GLOBAL_FIELD(b, GATS12UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS12UW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12UW, ADDR, v)
+#define SET_GATS12UW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12UW, NDX, v)
+
+#define GET_GATS12UW_ADDR(b) GET_GLOBAL_FIELD(b, GATS12UW, ADDR)
+#define GET_GATS12UW_NDX(b) GET_GLOBAL_FIELD(b, GATS12UW, NDX)
+
+/* Global Address Translation Status Register: GATSR */
+#define SET_GATSR_ACTIVE(b, v) SET_GLOBAL_FIELD(b, GATSR, ACTIVE, v)
+
+#define GET_GATSR_ACTIVE(b) GET_GLOBAL_FIELD(b, GATSR, ACTIVE)
+
+/* Global Fault Address Register: GFAR */
+#define SET_GFAR_FADDR(b, v) SET_GLOBAL_FIELD(b, GFAR, FADDR, v)
+
+#define GET_GFAR_FADDR(b) GET_GLOBAL_FIELD(b, GFAR, FADDR)
+
+/* Global Fault Status Register: GFSR */
+#define SET_GFSR_ICF(b, v) SET_GLOBAL_FIELD(b, GFSR, ICF, v)
+#define SET_GFSR_USF(b, v) SET_GLOBAL_FIELD(b, GFSR, USF, v)
+#define SET_GFSR_SMCF(b, v) SET_GLOBAL_FIELD(b, GFSR, SMCF, v)
+#define SET_GFSR_UCBF(b, v) SET_GLOBAL_FIELD(b, GFSR, UCBF, v)
+#define SET_GFSR_UCIF(b, v) SET_GLOBAL_FIELD(b, GFSR, UCIF, v)
+#define SET_GFSR_CAF(b, v) SET_GLOBAL_FIELD(b, GFSR, CAF, v)
+#define SET_GFSR_EF(b, v) SET_GLOBAL_FIELD(b, GFSR, EF, v)
+#define SET_GFSR_PF(b, v) SET_GLOBAL_FIELD(b, GFSR, PF, v)
+#define SET_GFSR_MULTI(b, v) SET_GLOBAL_FIELD(b, GFSR, MULTI, v)
+
+#define GET_GFSR_ICF(b) GET_GLOBAL_FIELD(b, GFSR, ICF)
+#define GET_GFSR_USF(b) GET_GLOBAL_FIELD(b, GFSR, USF)
+#define GET_GFSR_SMCF(b) GET_GLOBAL_FIELD(b, GFSR, SMCF)
+#define GET_GFSR_UCBF(b) GET_GLOBAL_FIELD(b, GFSR, UCBF)
+#define GET_GFSR_UCIF(b) GET_GLOBAL_FIELD(b, GFSR, UCIF)
+#define GET_GFSR_CAF(b) GET_GLOBAL_FIELD(b, GFSR, CAF)
+#define GET_GFSR_EF(b) GET_GLOBAL_FIELD(b, GFSR, EF)
+#define GET_GFSR_PF(b) GET_GLOBAL_FIELD(b, GFSR, PF)
+#define GET_GFSR_MULTI(b) GET_GLOBAL_FIELD(b, GFSR, MULTI)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define SET_GFSYNR0_NESTED(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NESTED, v)
+#define SET_GFSYNR0_WNR(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, WNR, v)
+#define SET_GFSYNR0_PNU(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, PNU, v)
+#define SET_GFSYNR0_IND(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, IND, v)
+#define SET_GFSYNR0_NSSTATE(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE, v)
+#define SET_GFSYNR0_NSATTR(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSATTR, v)
+
+#define GET_GFSYNR0_NESTED(b) GET_GLOBAL_FIELD(b, GFSYNR0, NESTED)
+#define GET_GFSYNR0_WNR(b) GET_GLOBAL_FIELD(b, GFSYNR0, WNR)
+#define GET_GFSYNR0_PNU(b) GET_GLOBAL_FIELD(b, GFSYNR0, PNU)
+#define GET_GFSYNR0_IND(b) GET_GLOBAL_FIELD(b, GFSYNR0, IND)
+#define GET_GFSYNR0_NSSTATE(b) GET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE)
+#define GET_GFSYNR0_NSATTR(b) GET_GLOBAL_FIELD(b, GFSYNR0, NSATTR)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define SET_GFSYNR1_SID(b, v) SET_GLOBAL_FIELD(b, GFSYNR1, SID, v)
+
+#define GET_GFSYNR1_SID(b) GET_GLOBAL_FIELD(b, GFSYNR1, SID)
+
+/* Global Physical Address Register: GPAR */
+#define SET_GPAR_F(b, v) SET_GLOBAL_FIELD(b, GPAR, F, v)
+#define SET_GPAR_SS(b, v) SET_GLOBAL_FIELD(b, GPAR, SS, v)
+#define SET_GPAR_OUTER(b, v) SET_GLOBAL_FIELD(b, GPAR, OUTER, v)
+#define SET_GPAR_INNER(b, v) SET_GLOBAL_FIELD(b, GPAR, INNER, v)
+#define SET_GPAR_SH(b, v) SET_GLOBAL_FIELD(b, GPAR, SH, v)
+#define SET_GPAR_NS(b, v) SET_GLOBAL_FIELD(b, GPAR, NS, v)
+#define SET_GPAR_NOS(b, v) SET_GLOBAL_FIELD(b, GPAR, NOS, v)
+#define SET_GPAR_PA(b, v) SET_GLOBAL_FIELD(b, GPAR, PA, v)
+#define SET_GPAR_TF(b, v) SET_GLOBAL_FIELD(b, GPAR, TF, v)
+#define SET_GPAR_AFF(b, v) SET_GLOBAL_FIELD(b, GPAR, AFF, v)
+#define SET_GPAR_PF(b, v) SET_GLOBAL_FIELD(b, GPAR, PF, v)
+#define SET_GPAR_EF(b, v) SET_GLOBAL_FIELD(b, GPAR, EF, v)
+#define SET_GPAR_TLCMCF(b, v) SET_GLOBAL_FIELD(b, GPAR, TLCMCF, v)
+#define SET_GPAR_TLBLKF(b, v) SET_GLOBAL_FIELD(b, GPAR, TLBLKF, v)
+#define SET_GPAR_UCBF(b, v) SET_GLOBAL_FIELD(b, GPAR, UCBF, v)
+
+#define GET_GPAR_F(b) GET_GLOBAL_FIELD(b, GPAR, F)
+#define GET_GPAR_SS(b) GET_GLOBAL_FIELD(b, GPAR, SS)
+#define GET_GPAR_OUTER(b) GET_GLOBAL_FIELD(b, GPAR, OUTER)
+#define GET_GPAR_INNER(b) GET_GLOBAL_FIELD(b, GPAR, INNER)
+#define GET_GPAR_SH(b) GET_GLOBAL_FIELD(b, GPAR, SH)
+#define GET_GPAR_NS(b) GET_GLOBAL_FIELD(b, GPAR, NS)
+#define GET_GPAR_NOS(b) GET_GLOBAL_FIELD(b, GPAR, NOS)
+#define GET_GPAR_PA(b) GET_GLOBAL_FIELD(b, GPAR, PA)
+#define GET_GPAR_TF(b) GET_GLOBAL_FIELD(b, GPAR, TF)
+#define GET_GPAR_AFF(b) GET_GLOBAL_FIELD(b, GPAR, AFF)
+#define GET_GPAR_PF(b) GET_GLOBAL_FIELD(b, GPAR, PF)
+#define GET_GPAR_EF(b) GET_GLOBAL_FIELD(b, GPAR, EF)
+#define GET_GPAR_TLCMCF(b) GET_GLOBAL_FIELD(b, GPAR, TLCMCF)
+#define GET_GPAR_TLBLKF(b) GET_GLOBAL_FIELD(b, GPAR, TLBLKF)
+#define GET_GPAR_UCBF(b) GET_GLOBAL_FIELD(b, GPAR, UCBF)
+
+/* Identification Register: IDR0 */
+#define SET_IDR0_NUMSMRG(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMSMRG, v)
+#define SET_IDR0_NUMSIDB(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMSIDB, v)
+#define SET_IDR0_BTM(b, v) SET_GLOBAL_FIELD(b, IDR0, BTM, v)
+#define SET_IDR0_CTTW(b, v) SET_GLOBAL_FIELD(b, IDR0, CTTW, v)
+#define SET_IDR0_NUMIRPT(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMIRPT, v)
+#define SET_IDR0_PTFS(b, v) SET_GLOBAL_FIELD(b, IDR0, PTFS, v)
+#define SET_IDR0_SMS(b, v) SET_GLOBAL_FIELD(b, IDR0, SMS, v)
+#define SET_IDR0_NTS(b, v) SET_GLOBAL_FIELD(b, IDR0, NTS, v)
+#define SET_IDR0_S2TS(b, v) SET_GLOBAL_FIELD(b, IDR0, S2TS, v)
+#define SET_IDR0_S1TS(b, v) SET_GLOBAL_FIELD(b, IDR0, S1TS, v)
+#define SET_IDR0_SES(b, v) SET_GLOBAL_FIELD(b, IDR0, SES, v)
+
+#define GET_IDR0_NUMSMRG(b) GET_GLOBAL_FIELD(b, IDR0, NUMSMRG)
+#define GET_IDR0_NUMSIDB(b) GET_GLOBAL_FIELD(b, IDR0, NUMSIDB)
+#define GET_IDR0_BTM(b) GET_GLOBAL_FIELD(b, IDR0, BTM)
+#define GET_IDR0_CTTW(b) GET_GLOBAL_FIELD(b, IDR0, CTTW)
+#define GET_IDR0_NUMIRPT(b) GET_GLOBAL_FIELD(b, IDR0, NUMIRPT)
+#define GET_IDR0_PTFS(b) GET_GLOBAL_FIELD(b, IDR0, PTFS)
+#define GET_IDR0_SMS(b) GET_GLOBAL_FIELD(b, IDR0, SMS)
+#define GET_IDR0_NTS(b) GET_GLOBAL_FIELD(b, IDR0, NTS)
+#define GET_IDR0_S2TS(b) GET_GLOBAL_FIELD(b, IDR0, S2TS)
+#define GET_IDR0_S1TS(b) GET_GLOBAL_FIELD(b, IDR0, S1TS)
+#define GET_IDR0_SES(b) GET_GLOBAL_FIELD(b, IDR0, SES)
+
+/* Identification Register: IDR1 */
+#define SET_IDR1_NUMCB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMCB, v)
+#define SET_IDR1_NUMSSDNDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB, v)
+#define SET_IDR1_SSDTP(b, v) SET_GLOBAL_FIELD(b, IDR1, SSDTP, v)
+#define SET_IDR1_SMCD(b, v) SET_GLOBAL_FIELD(b, IDR1, SMCD, v)
+#define SET_IDR1_NUMS2CB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMS2CB, v)
+#define SET_IDR1_NUMPAGENDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB, v)
+#define SET_IDR1_PAGESIZE(b, v) SET_GLOBAL_FIELD(b, IDR1, PAGESIZE, v)
+
+#define GET_IDR1_NUMCB(b) GET_GLOBAL_FIELD(b, IDR1, NUMCB)
+#define GET_IDR1_NUMSSDNDXB(b) GET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB)
+#define GET_IDR1_SSDTP(b) GET_GLOBAL_FIELD(b, IDR1, SSDTP)
+#define GET_IDR1_SMCD(b) GET_GLOBAL_FIELD(b, IDR1, SMCD)
+#define GET_IDR1_NUMS2CB(b) GET_GLOBAL_FIELD(b, IDR1, NUMS2CB)
+#define GET_IDR1_NUMPAGENDXB(b) GET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB)
+#define GET_IDR1_PAGESIZE(b) GET_GLOBAL_FIELD(b, IDR1, PAGESIZE)
+
+/* Identification Register: IDR2 */
+#define SET_IDR2_IAS(b, v) SET_GLOBAL_FIELD(b, IDR2, IAS, v)
+#define SET_IDR2_OAS(b, v) SET_GLOBAL_FIELD(b, IDR2, OAS, v)
+
+#define GET_IDR2_IAS(b) GET_GLOBAL_FIELD(b, IDR2, IAS)
+#define GET_IDR2_OAS(b) GET_GLOBAL_FIELD(b, IDR2, OAS)
+
+/* Identification Register: IDR7 */
+#define SET_IDR7_MINOR(b, v) SET_GLOBAL_FIELD(b, IDR7, MINOR, v)
+#define SET_IDR7_MAJOR(b, v) SET_GLOBAL_FIELD(b, IDR7, MAJOR, v)
+
+#define GET_IDR7_MINOR(b) GET_GLOBAL_FIELD(b, IDR7, MINOR)
+#define GET_IDR7_MAJOR(b) GET_GLOBAL_FIELD(b, IDR7, MAJOR)
+
+/* Stream to Context Register: S2CR_N */
+#define SET_S2CR_CBNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX, v)
+#define SET_S2CR_SHCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG, v)
+#define SET_S2CR_MTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG, v)
+#define SET_S2CR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR, v)
+#define SET_S2CR_TYPE(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, TYPE, v)
+#define SET_S2CR_NSCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG, v)
+#define SET_S2CR_RACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, RACFG, v)
+#define SET_S2CR_WACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, WACFG, v)
+#define SET_S2CR_PRIVCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG, v)
+#define SET_S2CR_INSTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG, v)
+#define SET_S2CR_TRANSIENTCFG(b, n, v) \
+ SET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG, v)
+#define SET_S2CR_VMID(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, VMID, v)
+#define SET_S2CR_BSU(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, BSU, v)
+#define SET_S2CR_FB(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, FB, v)
+
+#define GET_S2CR_CBNDX(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX)
+#define GET_S2CR_SHCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG)
+#define GET_S2CR_MTCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG)
+#define GET_S2CR_MEMATTR(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR)
+#define GET_S2CR_TYPE(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, TYPE)
+#define GET_S2CR_NSCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG)
+#define GET_S2CR_RACFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, RACFG)
+#define GET_S2CR_WACFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, WACFG)
+#define GET_S2CR_PRIVCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG)
+#define GET_S2CR_INSTCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG)
+#define GET_S2CR_TRANSIENTCFG(b, n) \
+ GET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG)
+#define GET_S2CR_VMID(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, VMID)
+#define GET_S2CR_BSU(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, BSU)
+#define GET_S2CR_FB(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, FB)
+
+/* Stream Match Register: SMR_N */
+#define SET_SMR_ID(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, ID, v)
+#define SET_SMR_MASK(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, MASK, v)
+#define SET_SMR_VALID(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, VALID, v)
+
+#define GET_SMR_ID(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, ID)
+#define GET_SMR_MASK(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, MASK)
+#define GET_SMR_VALID(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, VALID)
+
+/* Global TLB Status: TLBGSTATUS */
+#define SET_TLBGSTATUS_GSACTIVE(b, v) \
+ SET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE, v)
+
+#define GET_TLBGSTATUS_GSACTIVE(b) \
+ GET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE)
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define SET_TLBIVAH_ADDR(b, v) SET_GLOBAL_FIELD(b, TLBIVAH, ADDR, v)
+
+#define GET_TLBIVAH_ADDR(b) GET_GLOBAL_FIELD(b, TLBIVAH, ADDR)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define SET_TLBIVMID_VMID(b, v) SET_GLOBAL_FIELD(b, TLBIVMID, VMID, v)
+
+#define GET_TLBIVMID_VMID(b) GET_GLOBAL_FIELD(b, TLBIVMID, VMID)
+
+/* Global Register Space 1 Field setters/getters*/
+/* Context Bank Attribute Register: CBAR_N */
+#define SET_CBAR_VMID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, VMID, v)
+#define SET_CBAR_CBNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX, v)
+#define SET_CBAR_BPSHCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG, v)
+#define SET_CBAR_HYPC(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, HYPC, v)
+#define SET_CBAR_FB(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, FB, v)
+#define SET_CBAR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR, v)
+#define SET_CBAR_TYPE(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, TYPE, v)
+#define SET_CBAR_BSU(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, BSU, v)
+#define SET_CBAR_RACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, RACFG, v)
+#define SET_CBAR_WACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, WACFG, v)
+#define SET_CBAR_IRPTNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX, v)
+
+#define GET_CBAR_VMID(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, VMID)
+#define GET_CBAR_CBNDX(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX)
+#define GET_CBAR_BPSHCFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG)
+#define GET_CBAR_HYPC(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, HYPC)
+#define GET_CBAR_FB(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, FB)
+#define GET_CBAR_MEMATTR(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR)
+#define GET_CBAR_TYPE(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, TYPE)
+#define GET_CBAR_BSU(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, BSU)
+#define GET_CBAR_RACFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, RACFG)
+#define GET_CBAR_WACFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, WACFG)
+#define GET_CBAR_IRPTNDX(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA_N */
+#define SET_CBFRSYNRA_SID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID, v)
+
+#define GET_CBFRSYNRA_SID(b, n) GET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID)
+
+/* Stage 1 Context Bank Format Fields */
+#define SET_CB_ACTLR_REQPRIORITY (b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY, v)
+#define SET_CB_ACTLR_REQPRIORITYCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG, v)
+#define SET_CB_ACTLR_PRIVCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG, v)
+#define SET_CB_ACTLR_BPRCOSH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH, v)
+#define SET_CB_ACTLR_BPRCISH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH, v)
+#define SET_CB_ACTLR_BPRCNSH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH, v)
+
+#define GET_CB_ACTLR_REQPRIORITY (b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY)
+#define GET_CB_ACTLR_REQPRIORITYCFG(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG)
+#define GET_CB_ACTLR_PRIVCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG)
+#define GET_CB_ACTLR_BPRCOSH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH)
+#define GET_CB_ACTLR_BPRCISH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH)
+#define GET_CB_ACTLR_BPRCNSH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define SET_CB_ATS1PR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR, v)
+
+#define GET_CB_ATS1PR_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define SET_CB_ATS1PW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR, v)
+
+#define GET_CB_ATS1PW_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define SET_CB_ATS1UR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR, v)
+
+#define GET_CB_ATS1UR_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define SET_CB_ATS1UW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR, v)
+
+#define GET_CB_ATS1UW_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR)
+
+/* Address Translation Status Register: CB_ATSR */
+#define SET_CB_ATSR_ACTIVE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE, v)
+
+#define GET_CB_ATSR_ACTIVE(b, c) GET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define SET_CB_CONTEXTIDR_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID, v)
+#define SET_CB_CONTEXTIDR_PROCID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID, v)
+
+#define GET_CB_CONTEXTIDR_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID)
+#define GET_CB_CONTEXTIDR_PROCID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID)
+
+/* Fault Address Register: CB_FAR */
+#define SET_CB_FAR_FADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FAR, FADDR, v)
+
+#define GET_CB_FAR_FADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_FAR, FADDR)
+
+/* Fault Status Register: CB_FSR */
+#define SET_CB_FSR_TF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TF, v)
+#define SET_CB_FSR_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, AFF, v)
+#define SET_CB_FSR_PF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, PF, v)
+#define SET_CB_FSR_EF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, EF, v)
+#define SET_CB_FSR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF, v)
+#define SET_CB_FSR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF, v)
+#define SET_CB_FSR_SS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, SS, v)
+#define SET_CB_FSR_MULTI(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, MULTI, v)
+
+#define GET_CB_FSR_TF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TF)
+#define GET_CB_FSR_AFF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, AFF)
+#define GET_CB_FSR_PF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, PF)
+#define GET_CB_FSR_EF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, EF)
+#define GET_CB_FSR_TLBMCF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF)
+#define GET_CB_FSR_TLBLKF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF)
+#define GET_CB_FSR_SS(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, SS)
+#define GET_CB_FSR_MULTI(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, MULTI)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define SET_CB_FSYNR0_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL, v)
+#define SET_CB_FSYNR0_S1PTWF(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF, v)
+#define SET_CB_FSYNR0_WNR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR, v)
+#define SET_CB_FSYNR0_PNU(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU, v)
+#define SET_CB_FSYNR0_IND(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND, v)
+#define SET_CB_FSYNR0_NSSTATE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE, v)
+#define SET_CB_FSYNR0_NSATTR(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR, v)
+#define SET_CB_FSYNR0_ATOF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF, v)
+#define SET_CB_FSYNR0_PTWF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF, v)
+#define SET_CB_FSYNR0_AFR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR, v)
+#define SET_CB_FSYNR0_S1CBNDX(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX, v)
+
+#define GET_CB_FSYNR0_PLVL(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL)
+#define GET_CB_FSYNR0_S1PTWF(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF)
+#define GET_CB_FSYNR0_WNR(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR)
+#define GET_CB_FSYNR0_PNU(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU)
+#define GET_CB_FSYNR0_IND(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND)
+#define GET_CB_FSYNR0_NSSTATE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE)
+#define GET_CB_FSYNR0_NSATTR(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR)
+#define GET_CB_FSYNR0_ATOF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF)
+#define GET_CB_FSYNR0_PTWF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF)
+#define GET_CB_FSYNR0_AFR(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR)
+#define GET_CB_FSYNR0_S1CBNDX(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define SET_CB_NMRR_IR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR0, v)
+#define SET_CB_NMRR_IR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR1, v)
+#define SET_CB_NMRR_IR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR2, v)
+#define SET_CB_NMRR_IR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR3, v)
+#define SET_CB_NMRR_IR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR4, v)
+#define SET_CB_NMRR_IR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR5, v)
+#define SET_CB_NMRR_IR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR6, v)
+#define SET_CB_NMRR_IR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR7, v)
+#define SET_CB_NMRR_OR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR0, v)
+#define SET_CB_NMRR_OR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR1, v)
+#define SET_CB_NMRR_OR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR2, v)
+#define SET_CB_NMRR_OR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR3, v)
+#define SET_CB_NMRR_OR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR4, v)
+#define SET_CB_NMRR_OR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR5, v)
+#define SET_CB_NMRR_OR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR6, v)
+#define SET_CB_NMRR_OR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR7, v)
+
+#define GET_CB_NMRR_IR0(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR0)
+#define GET_CB_NMRR_IR1(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR1)
+#define GET_CB_NMRR_IR2(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR2)
+#define GET_CB_NMRR_IR3(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR3)
+#define GET_CB_NMRR_IR4(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR4)
+#define GET_CB_NMRR_IR5(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR5)
+#define GET_CB_NMRR_IR6(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR6)
+#define GET_CB_NMRR_IR7(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR7)
+#define GET_CB_NMRR_OR0(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR0)
+#define GET_CB_NMRR_OR1(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR1)
+#define GET_CB_NMRR_OR2(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR2)
+#define GET_CB_NMRR_OR3(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR3)
+#define GET_CB_NMRR_OR4(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR4)
+#define GET_CB_NMRR_OR5(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR5)
+
+/* Physical Address Register: CB_PAR */
+#define SET_CB_PAR_F(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, F, v)
+#define SET_CB_PAR_SS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, SS, v)
+#define SET_CB_PAR_OUTER(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, OUTER, v)
+#define SET_CB_PAR_INNER(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, INNER, v)
+#define SET_CB_PAR_SH(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, SH, v)
+#define SET_CB_PAR_NS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, NS, v)
+#define SET_CB_PAR_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, NOS, v)
+#define SET_CB_PAR_PA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PA, v)
+#define SET_CB_PAR_TF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TF, v)
+#define SET_CB_PAR_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, AFF, v)
+#define SET_CB_PAR_PF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PF, v)
+#define SET_CB_PAR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF, v)
+#define SET_CB_PAR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF, v)
+#define SET_CB_PAR_ATOT(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, ATOT, v)
+#define SET_CB_PAR_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PLVL, v)
+#define SET_CB_PAR_STAGE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, STAGE, v)
+
+#define GET_CB_PAR_F(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, F)
+#define GET_CB_PAR_SS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, SS)
+#define GET_CB_PAR_OUTER(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, OUTER)
+#define GET_CB_PAR_INNER(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, INNER)
+#define GET_CB_PAR_SH(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, SH)
+#define GET_CB_PAR_NS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, NS)
+#define GET_CB_PAR_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, NOS)
+#define GET_CB_PAR_PA(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PA)
+#define GET_CB_PAR_TF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TF)
+#define GET_CB_PAR_AFF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, AFF)
+#define GET_CB_PAR_PF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PF)
+#define GET_CB_PAR_TLBMCF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF)
+#define GET_CB_PAR_TLBLKF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF)
+#define GET_CB_PAR_ATOT(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, ATOT)
+#define GET_CB_PAR_PLVL(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PLVL)
+#define GET_CB_PAR_STAGE(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, STAGE)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define SET_CB_PRRR_TR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR0, v)
+#define SET_CB_PRRR_TR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR1, v)
+#define SET_CB_PRRR_TR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR2, v)
+#define SET_CB_PRRR_TR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR3, v)
+#define SET_CB_PRRR_TR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR4, v)
+#define SET_CB_PRRR_TR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR5, v)
+#define SET_CB_PRRR_TR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR6, v)
+#define SET_CB_PRRR_TR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR7, v)
+#define SET_CB_PRRR_DS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, DS0, v)
+#define SET_CB_PRRR_DS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, DS1, v)
+#define SET_CB_PRRR_NS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NS0, v)
+#define SET_CB_PRRR_NS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NS1, v)
+#define SET_CB_PRRR_NOS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0, v)
+#define SET_CB_PRRR_NOS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1, v)
+#define SET_CB_PRRR_NOS2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2, v)
+#define SET_CB_PRRR_NOS3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3, v)
+#define SET_CB_PRRR_NOS4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4, v)
+#define SET_CB_PRRR_NOS5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5, v)
+#define SET_CB_PRRR_NOS6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6, v)
+#define SET_CB_PRRR_NOS7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7, v)
+
+#define GET_CB_PRRR_TR0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR0)
+#define GET_CB_PRRR_TR1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR1)
+#define GET_CB_PRRR_TR2(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR2)
+#define GET_CB_PRRR_TR3(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR3)
+#define GET_CB_PRRR_TR4(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR4)
+#define GET_CB_PRRR_TR5(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR5)
+#define GET_CB_PRRR_TR6(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR6)
+#define GET_CB_PRRR_TR7(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR7)
+#define GET_CB_PRRR_DS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, DS0)
+#define GET_CB_PRRR_DS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, DS1)
+#define GET_CB_PRRR_NS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NS0)
+#define GET_CB_PRRR_NS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NS1)
+#define GET_CB_PRRR_NOS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0)
+#define GET_CB_PRRR_NOS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1)
+#define GET_CB_PRRR_NOS2(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2)
+#define GET_CB_PRRR_NOS3(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3)
+#define GET_CB_PRRR_NOS4(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4)
+#define GET_CB_PRRR_NOS5(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5)
+#define GET_CB_PRRR_NOS6(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6)
+#define GET_CB_PRRR_NOS7(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7)
+
+/* Transaction Resume: CB_RESUME */
+#define SET_CB_RESUME_TNR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_RESUME, TNR, v)
+
+#define GET_CB_RESUME_TNR(b, c) GET_CONTEXT_FIELD(b, c, CB_RESUME, TNR)
+
+/* System Control Register: CB_SCTLR */
+#define SET_CB_SCTLR_M(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, M, v)
+#define SET_CB_SCTLR_TRE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE, v)
+#define SET_CB_SCTLR_AFE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE, v)
+#define SET_CB_SCTLR_AFFD(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD, v)
+#define SET_CB_SCTLR_E(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, E, v)
+#define SET_CB_SCTLR_CFRE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE, v)
+#define SET_CB_SCTLR_CFIE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE, v)
+#define SET_CB_SCTLR_CFCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG, v)
+#define SET_CB_SCTLR_HUPCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF, v)
+#define SET_CB_SCTLR_WXN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN, v)
+#define SET_CB_SCTLR_UWXN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN, v)
+#define SET_CB_SCTLR_ASIDPNE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE, v)
+#define SET_CB_SCTLR_TRANSIENTCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG, v)
+#define SET_CB_SCTLR_MEMATTR(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR, v)
+#define SET_CB_SCTLR_MTCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG, v)
+#define SET_CB_SCTLR_SHCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG, v)
+#define SET_CB_SCTLR_RACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG, v)
+#define SET_CB_SCTLR_WACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG, v)
+#define SET_CB_SCTLR_NSCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG, v)
+
+#define GET_CB_SCTLR_M(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, M)
+#define GET_CB_SCTLR_TRE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE)
+#define GET_CB_SCTLR_AFE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE)
+#define GET_CB_SCTLR_AFFD(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD)
+#define GET_CB_SCTLR_E(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, E)
+#define GET_CB_SCTLR_CFRE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE)
+#define GET_CB_SCTLR_CFIE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE)
+#define GET_CB_SCTLR_CFCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG)
+#define GET_CB_SCTLR_HUPCF(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF)
+#define GET_CB_SCTLR_WXN(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN)
+#define GET_CB_SCTLR_UWXN(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN)
+#define GET_CB_SCTLR_ASIDPNE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE)
+#define GET_CB_SCTLR_TRANSIENTCFG(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG)
+#define GET_CB_SCTLR_MEMATTR(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR)
+#define GET_CB_SCTLR_MTCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG)
+#define GET_CB_SCTLR_SHCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG)
+#define GET_CB_SCTLR_RACFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG)
+#define GET_CB_SCTLR_WACFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG)
+#define GET_CB_SCTLR_NSCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define SET_CB_TLBIASID_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID, v)
+
+#define GET_CB_TLBIASID_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define SET_CB_TLBIVA_ASID(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID, v)
+#define SET_CB_TLBIVA_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA, v)
+
+#define GET_CB_TLBIVA_ASID(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID)
+#define GET_CB_TLBIVA_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define SET_CB_TLBIVAA_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA, v)
+
+#define GET_CB_TLBIVAA_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define SET_CB_TLBIVAAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA, v)
+
+#define GET_CB_TLBIVAAL_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define SET_CB_TLBIVAL_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID, v)
+#define SET_CB_TLBIVAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA, v)
+
+#define GET_CB_TLBIVAL_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID)
+#define GET_CB_TLBIVAL_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA)
+
+/* TLB Status: CB_TLBSTATUS */
+#define SET_CB_TLBSTATUS_SACTIVE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE, v)
+
+#define GET_CB_TLBSTATUS_SACTIVE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+/* These are shared between VMSA and LPAE */
+#define GET_CB_TTBCR_EAE(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE)
+#define SET_CB_TTBCR_EAE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE, v)
+
+#define SET_CB_TTBCR_NSCFG0(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v)
+#define SET_CB_TTBCR_NSCFG1(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v)
+
+#define GET_CB_TTBCR_NSCFG0(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0)
+#define GET_CB_TTBCR_NSCFG1(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1)
+
+#ifdef CONFIG_IOMMU_LPAE
+
+/* LPAE format */
+
+/* Translation Table Base Register 0: CB_TTBR */
+#define SET_TTBR0(b, c, v) SET_CTX_REG_Q(CB_TTBR0, (b), (c), (v))
+#define SET_TTBR1(b, c, v) SET_CTX_REG_Q(CB_TTBR1, (b), (c), (v))
+
+#define SET_CB_TTBR0_ASID(b, c, v) SET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ASID, v)
+#define SET_CB_TTBR0_ADDR(b, c, v) SET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR, v)
+
+#define GET_CB_TTBR0_ASID(b, c) GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ASID)
+#define GET_CB_TTBR0_ADDR(b, c) GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR)
+#define GET_CB_TTBR0(b, c) GET_CTX_REG_Q(CB_TTBR0, (b), (c))
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define SET_CB_TTBCR_T0SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ, v)
+#define SET_CB_TTBCR_T1SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ, v)
+#define SET_CB_TTBCR_EPD0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD0, v)
+#define SET_CB_TTBCR_EPD1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD1, v)
+#define SET_CB_TTBCR_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN0, v)
+#define SET_CB_TTBCR_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN1, v)
+#define SET_CB_TTBCR_ORGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN0, v)
+#define SET_CB_TTBCR_ORGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN1, v)
+#define SET_CB_TTBCR_NSCFG0(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v)
+#define SET_CB_TTBCR_NSCFG1(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v)
+
+#define SET_CB_TTBCR_SH0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, SH0, v)
+#define SET_CB_TTBCR_SH1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, SH1, v)
+#define SET_CB_TTBCR_A1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, A1, v)
+
+#define GET_CB_TTBCR_T0SZ(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ)
+#define GET_CB_TTBCR_T1SZ(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ)
+#define GET_CB_TTBCR_EPD0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD0)
+#define GET_CB_TTBCR_EPD1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD1)
+#define GET_CB_TTBCR_IRGN0(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN0)
+#define GET_CB_TTBCR_IRGN1(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN1)
+#define GET_CB_TTBCR_ORGN0(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN0)
+#define GET_CB_TTBCR_ORGN1(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN1)
+
+#define SET_CB_MAIR0(b, c, v) SET_CTX_REG(CB_MAIR0, (b), (c), (v))
+#define SET_CB_MAIR1(b, c, v) SET_CTX_REG(CB_MAIR1, (b), (c), (v))
+
+#define GET_CB_MAIR0(b, c) GET_CTX_REG(CB_MAIR0, (b), (c))
+#define GET_CB_MAIR1(b, c) GET_CTX_REG(CB_MAIR1, (b), (c))
+#else
+#define SET_TTBR0(b, c, v) SET_CTX_REG(CB_TTBR0, (b), (c), (v))
+#define SET_TTBR1(b, c, v) SET_CTX_REG(CB_TTBR1, (b), (c), (v))
+
+#define SET_CB_TTBCR_PD0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD0, v)
+#define SET_CB_TTBCR_PD1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD1, v)
+
+#define SET_CB_TTBR0_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1, v)
+#define SET_CB_TTBR0_S(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, S, v)
+#define SET_CB_TTBR0_RGN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN, v)
+#define SET_CB_TTBR0_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS, v)
+#define SET_CB_TTBR0_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0, v)
+#define SET_CB_TTBR0_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR, v)
+
+#define GET_CB_TTBR0_IRGN1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1)
+#define GET_CB_TTBR0_S(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, S)
+#define GET_CB_TTBR0_RGN(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN)
+#define GET_CB_TTBR0_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS)
+#define GET_CB_TTBR0_IRGN0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0)
+#define GET_CB_TTBR0_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR)
+#endif
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define SET_CB_TTBR1_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1, v)
+#define SET_CB_TTBR1_0S(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, S, v)
+#define SET_CB_TTBR1_RGN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN, v)
+#define SET_CB_TTBR1_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS, v)
+#define SET_CB_TTBR1_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0, v)
+#define SET_CB_TTBR1_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR, v)
+
+#define GET_CB_TTBR1_IRGN1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1)
+#define GET_CB_TTBR1_0S(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, S)
+#define GET_CB_TTBR1_RGN(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN)
+#define GET_CB_TTBR1_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS)
+#define GET_CB_TTBR1_IRGN0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0)
+#define GET_CB_TTBR1_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR)
+
+/* Global Register Space 0 */
+#define CR0 (0x0000)
+#define SCR1 (0x0004)
+#define CR2 (0x0008)
+#define ACR (0x0010)
+#define IDR0 (0x0020)
+#define IDR1 (0x0024)
+#define IDR2 (0x0028)
+#define IDR7 (0x003C)
+#define GFAR (0x0040)
+#define GFSR (0x0048)
+#define GFSRRESTORE (0x004C)
+#define GFSYNR0 (0x0050)
+#define GFSYNR1 (0x0054)
+#define GFSYNR2 (0x0058)
+#define TLBIVMID (0x0064)
+#define TLBIALLNSNH (0x0068)
+#define TLBIALLH (0x006C)
+#define TLBGSYNC (0x0070)
+#define TLBGSTATUS (0x0074)
+#define TLBIVAH (0x0078)
+#define GATS1UR (0x0100)
+#define GATS1UW (0x0108)
+#define GATS1PR (0x0110)
+#define GATS1PW (0x0118)
+#define GATS12UR (0x0120)
+#define GATS12UW (0x0128)
+#define GATS12PR (0x0130)
+#define GATS12PW (0x0138)
+#define GPAR (0x0180)
+#define GATSR (0x0188)
+#define NSCR0 (0x0400)
+#define NSCR2 (0x0408)
+#define NSACR (0x0410)
+#define NSGFAR (0x0440)
+#define NSGFSRRESTORE (0x044C)
+#define SMR (0x0800)
+#define S2CR (0x0C00)
+
+/* SMMU_LOCAL */
+#define SMMU_INTR_SEL_NS (0x2000)
+
+/* Global Register Space 1 */
+#define CBAR (0x1000)
+#define CBFRSYNRA (0x1400)
+
+/* Implementation defined Register Space */
+#define MICRO_MMU_CTRL (0x2000)
+#define PREDICTIONDIS0 (0x204C)
+#define PREDICTIONDIS1 (0x2050)
+#define S1L1BFBLP0 (0x215C)
+
+/* Performance Monitoring Register Space */
+#define PMEVCNTR_N (0x3000)
+#define PMEVTYPER_N (0x3400)
+#define PMCGCR_N (0x3800)
+#define PMCGSMR_N (0x3A00)
+#define PMCNTENSET_N (0x3C00)
+#define PMCNTENCLR_N (0x3C20)
+#define PMINTENSET_N (0x3C40)
+#define PMINTENCLR_N (0x3C60)
+#define PMOVSCLR_N (0x3C80)
+#define PMOVSSET_N (0x3CC0)
+#define PMCFGR (0x3E00)
+#define PMCR (0x3E04)
+#define PMCEID0 (0x3E20)
+#define PMCEID1 (0x3E24)
+#define PMAUTHSTATUS (0x3FB8)
+#define PMDEVTYPE (0x3FCC)
+
+/* Secure Status Determination Address Space */
+#define SSDR_N (0x4000)
+
+/* Stage 1 Context Bank Format */
+#define CB_SCTLR (0x000)
+#define CB_ACTLR (0x004)
+#define CB_RESUME (0x008)
+#define CB_TTBR0 (0x020)
+#define CB_TTBR1 (0x028)
+#define CB_TTBCR (0x030)
+#define CB_CONTEXTIDR (0x034)
+#define CB_PRRR (0x038)
+#define CB_MAIR0 (0x038)
+#define CB_NMRR (0x03C)
+#define CB_MAIR1 (0x03C)
+#define CB_PAR (0x050)
+#define CB_FSR (0x058)
+#define CB_FSRRESTORE (0x05C)
+#define CB_FAR (0x060)
+#define CB_FSYNR0 (0x068)
+#define CB_FSYNR1 (0x06C)
+#define CB_TLBIVA (0x600)
+#define CB_TLBIVAA (0x608)
+#define CB_TLBIASID (0x610)
+#define CB_TLBIALL (0x618)
+#define CB_TLBIVAL (0x620)
+#define CB_TLBIVAAL (0x628)
+#define CB_TLBSYNC (0x7F0)
+#define CB_TLBSTATUS (0x7F4)
+#define CB_ATS1PR (0x800)
+#define CB_ATS1PW (0x808)
+#define CB_ATS1UR (0x810)
+#define CB_ATS1UW (0x818)
+#define CB_ATSR (0x8F0)
+#define CB_PMXEVCNTR_N (0xE00)
+#define CB_PMXEVTYPER_N (0xE80)
+#define CB_PMCFGR (0xF00)
+#define CB_PMCR (0xF04)
+#define CB_PMCEID0 (0xF20)
+#define CB_PMCEID1 (0xF24)
+#define CB_PMCNTENSET (0xF40)
+#define CB_PMCNTENCLR (0xF44)
+#define CB_PMINTENSET (0xF48)
+#define CB_PMINTENCLR (0xF4C)
+#define CB_PMOVSCLR (0xF50)
+#define CB_PMOVSSET (0xF58)
+#define CB_PMAUTHSTATUS (0xFB8)
+
+/* Global Register Fields */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG (CR0_NSCFG_MASK << CR0_NSCFG_SHIFT)
+#define CR0_WACFG (CR0_WACFG_MASK << CR0_WACFG_SHIFT)
+#define CR0_RACFG (CR0_RACFG_MASK << CR0_RACFG_SHIFT)
+#define CR0_SHCFG (CR0_SHCFG_MASK << CR0_SHCFG_SHIFT)
+#define CR0_SMCFCFG (CR0_SMCFCFG_MASK << CR0_SMCFCFG_SHIFT)
+#define CR0_MTCFG (CR0_MTCFG_MASK << CR0_MTCFG_SHIFT)
+#define CR0_MEMATTR (CR0_MEMATTR_MASK << CR0_MEMATTR_SHIFT)
+#define CR0_BSU (CR0_BSU_MASK << CR0_BSU_SHIFT)
+#define CR0_FB (CR0_FB_MASK << CR0_FB_SHIFT)
+#define CR0_PTM (CR0_PTM_MASK << CR0_PTM_SHIFT)
+#define CR0_VMIDPNE (CR0_VMIDPNE_MASK << CR0_VMIDPNE_SHIFT)
+#define CR0_USFCFG (CR0_USFCFG_MASK << CR0_USFCFG_SHIFT)
+#define CR0_GSE (CR0_GSE_MASK << CR0_GSE_SHIFT)
+#define CR0_STALLD (CR0_STALLD_MASK << CR0_STALLD_SHIFT)
+#define CR0_TRANSIENTCFG (CR0_TRANSIENTCFG_MASK << CR0_TRANSIENTCFG_SHIFT)
+#define CR0_GCFGFIE (CR0_GCFGFIE_MASK << CR0_GCFGFIE_SHIFT)
+#define CR0_GCFGFRE (CR0_GCFGFRE_MASK << CR0_GCFGFRE_SHIFT)
+#define CR0_GFIE (CR0_GFIE_MASK << CR0_GFIE_SHIFT)
+#define CR0_GFRE (CR0_GFRE_MASK << CR0_GFRE_SHIFT)
+#define CR0_CLIENTPD (CR0_CLIENTPD_MASK << CR0_CLIENTPD_SHIFT)
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID (CR2_BPVMID_MASK << CR2_BPVMID_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR (GATS1PR_ADDR_MASK << GATS1PR_ADDR_SHIFT)
+#define GATS1PR_NDX (GATS1PR_NDX_MASK << GATS1PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR (GATS1PW_ADDR_MASK << GATS1PW_ADDR_SHIFT)
+#define GATS1PW_NDX (GATS1PW_NDX_MASK << GATS1PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR (GATS1UR_ADDR_MASK << GATS1UR_ADDR_SHIFT)
+#define GATS1UR_NDX (GATS1UR_NDX_MASK << GATS1UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR (GATS1UW_ADDR_MASK << GATS1UW_ADDR_SHIFT)
+#define GATS1UW_NDX (GATS1UW_NDX_MASK << GATS1UW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR (GATS12PR_ADDR_MASK << GATS12PR_ADDR_SHIFT)
+#define GATS12PR_NDX (GATS12PR_NDX_MASK << GATS12PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR (GATS12PW_ADDR_MASK << GATS12PW_ADDR_SHIFT)
+#define GATS12PW_NDX (GATS12PW_NDX_MASK << GATS12PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR (GATS12UR_ADDR_MASK << GATS12UR_ADDR_SHIFT)
+#define GATS12UR_NDX (GATS12UR_NDX_MASK << GATS12UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR (GATS12UW_ADDR_MASK << GATS12UW_ADDR_SHIFT)
+#define GATS12UW_NDX (GATS12UW_NDX_MASK << GATS12UW_NDX_SHIFT)
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE (GATSR_ACTIVE_MASK << GATSR_ACTIVE_SHIFT)
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR (GFAR_FADDR_MASK << GFAR_FADDR_SHIFT)
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF (GFSR_ICF_MASK << GFSR_ICF_SHIFT)
+#define GFSR_USF (GFSR_USF_MASK << GFSR_USF_SHIFT)
+#define GFSR_SMCF (GFSR_SMCF_MASK << GFSR_SMCF_SHIFT)
+#define GFSR_UCBF (GFSR_UCBF_MASK << GFSR_UCBF_SHIFT)
+#define GFSR_UCIF (GFSR_UCIF_MASK << GFSR_UCIF_SHIFT)
+#define GFSR_CAF (GFSR_CAF_MASK << GFSR_CAF_SHIFT)
+#define GFSR_EF (GFSR_EF_MASK << GFSR_EF_SHIFT)
+#define GFSR_PF (GFSR_PF_MASK << GFSR_PF_SHIFT)
+#define GFSR_MULTI (GFSR_MULTI_MASK << GFSR_MULTI_SHIFT)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED (GFSYNR0_NESTED_MASK << GFSYNR0_NESTED_SHIFT)
+#define GFSYNR0_WNR (GFSYNR0_WNR_MASK << GFSYNR0_WNR_SHIFT)
+#define GFSYNR0_PNU (GFSYNR0_PNU_MASK << GFSYNR0_PNU_SHIFT)
+#define GFSYNR0_IND (GFSYNR0_IND_MASK << GFSYNR0_IND_SHIFT)
+#define GFSYNR0_NSSTATE (GFSYNR0_NSSTATE_MASK << GFSYNR0_NSSTATE_SHIFT)
+#define GFSYNR0_NSATTR (GFSYNR0_NSATTR_MASK << GFSYNR0_NSATTR_SHIFT)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID (GFSYNR1_SID_MASK << GFSYNR1_SID_SHIFT)
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F (GPAR_F_MASK << GPAR_F_SHIFT)
+#define GPAR_SS (GPAR_SS_MASK << GPAR_SS_SHIFT)
+#define GPAR_OUTER (GPAR_OUTER_MASK << GPAR_OUTER_SHIFT)
+#define GPAR_INNER (GPAR_INNER_MASK << GPAR_INNER_SHIFT)
+#define GPAR_SH (GPAR_SH_MASK << GPAR_SH_SHIFT)
+#define GPAR_NS (GPAR_NS_MASK << GPAR_NS_SHIFT)
+#define GPAR_NOS (GPAR_NOS_MASK << GPAR_NOS_SHIFT)
+#define GPAR_PA (GPAR_PA_MASK << GPAR_PA_SHIFT)
+#define GPAR_TF (GPAR_TF_MASK << GPAR_TF_SHIFT)
+#define GPAR_AFF (GPAR_AFF_MASK << GPAR_AFF_SHIFT)
+#define GPAR_PF (GPAR_PF_MASK << GPAR_PF_SHIFT)
+#define GPAR_EF (GPAR_EF_MASK << GPAR_EF_SHIFT)
+#define GPAR_TLCMCF (GPAR_TLBMCF_MASK << GPAR_TLCMCF_SHIFT)
+#define GPAR_TLBLKF (GPAR_TLBLKF_MASK << GPAR_TLBLKF_SHIFT)
+#define GPAR_UCBF (GPAR_UCBF_MASK << GFAR_UCBF_SHIFT)
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG (IDR0_NUMSMRG_MASK << IDR0_NUMSMGR_SHIFT)
+#define IDR0_NUMSIDB (IDR0_NUMSIDB_MASK << IDR0_NUMSIDB_SHIFT)
+#define IDR0_BTM (IDR0_BTM_MASK << IDR0_BTM_SHIFT)
+#define IDR0_CTTW (IDR0_CTTW_MASK << IDR0_CTTW_SHIFT)
+#define IDR0_NUMIRPT (IDR0_NUMIPRT_MASK << IDR0_NUMIRPT_SHIFT)
+#define IDR0_PTFS (IDR0_PTFS_MASK << IDR0_PTFS_SHIFT)
+#define IDR0_SMS (IDR0_SMS_MASK << IDR0_SMS_SHIFT)
+#define IDR0_NTS (IDR0_NTS_MASK << IDR0_NTS_SHIFT)
+#define IDR0_S2TS (IDR0_S2TS_MASK << IDR0_S2TS_SHIFT)
+#define IDR0_S1TS (IDR0_S1TS_MASK << IDR0_S1TS_SHIFT)
+#define IDR0_SES (IDR0_SES_MASK << IDR0_SES_SHIFT)
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB (IDR1_NUMCB_MASK << IDR1_NUMCB_SHIFT)
+#define IDR1_NUMSSDNDXB (IDR1_NUMSSDNDXB_MASK << IDR1_NUMSSDNDXB_SHIFT)
+#define IDR1_SSDTP (IDR1_SSDTP_MASK << IDR1_SSDTP_SHIFT)
+#define IDR1_SMCD (IDR1_SMCD_MASK << IDR1_SMCD_SHIFT)
+#define IDR1_NUMS2CB (IDR1_NUMS2CB_MASK << IDR1_NUMS2CB_SHIFT)
+#define IDR1_NUMPAGENDXB (IDR1_NUMPAGENDXB_MASK << IDR1_NUMPAGENDXB_SHIFT)
+#define IDR1_PAGESIZE (IDR1_PAGESIZE_MASK << IDR1_PAGESIZE_SHIFT)
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS (IDR2_IAS_MASK << IDR2_IAS_SHIFT)
+#define IDR1_OAS (IDR2_OAS_MASK << IDR2_OAS_SHIFT)
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR (IDR7_MINOR_MASK << IDR7_MINOR_SHIFT)
+#define IDR7_MAJOR (IDR7_MAJOR_MASK << IDR7_MAJOR_SHIFT)
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX (S2CR_CBNDX_MASK << S2cR_CBNDX_SHIFT)
+#define S2CR_SHCFG (S2CR_SHCFG_MASK << s2CR_SHCFG_SHIFT)
+#define S2CR_MTCFG (S2CR_MTCFG_MASK << S2CR_MTCFG_SHIFT)
+#define S2CR_MEMATTR (S2CR_MEMATTR_MASK << S2CR_MEMATTR_SHIFT)
+#define S2CR_TYPE (S2CR_TYPE_MASK << S2CR_TYPE_SHIFT)
+#define S2CR_NSCFG (S2CR_NSCFG_MASK << S2CR_NSCFG_SHIFT)
+#define S2CR_RACFG (S2CR_RACFG_MASK << S2CR_RACFG_SHIFT)
+#define S2CR_WACFG (S2CR_WACFG_MASK << S2CR_WACFG_SHIFT)
+#define S2CR_PRIVCFG (S2CR_PRIVCFG_MASK << S2CR_PRIVCFG_SHIFT)
+#define S2CR_INSTCFG (S2CR_INSTCFG_MASK << S2CR_INSTCFG_SHIFT)
+#define S2CR_TRANSIENTCFG (S2CR_TRANSIENTCFG_MASK << S2CR_TRANSIENTCFG_SHIFT)
+#define S2CR_VMID (S2CR_VMID_MASK << S2CR_VMID_SHIFT)
+#define S2CR_BSU (S2CR_BSU_MASK << S2CR_BSU_SHIFT)
+#define S2CR_FB (S2CR_FB_MASK << S2CR_FB_SHIFT)
+
+/* Stream Match Register: SMR */
+#define SMR_ID (SMR_ID_MASK << SMR_ID_SHIFT)
+#define SMR_MASK (SMR_MASK_MASK << SMR_MASK_SHIFT)
+#define SMR_VALID (SMR_VALID_MASK << SMR_VALID_SHIFT)
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE (TLBGSTATUS_GSACTIVE_MASK << \
+ TLBGSTATUS_GSACTIVE_SHIFT)
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR (TLBIVAH_ADDR_MASK << TLBIVAH_ADDR_SHIFT)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID (TLBIVMID_VMID_MASK << TLBIVMID_VMID_SHIFT)
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID (CBAR_VMID_MASK << CBAR_VMID_SHIFT)
+#define CBAR_CBNDX (CBAR_CBNDX_MASK << CBAR_CBNDX_SHIFT)
+#define CBAR_BPSHCFG (CBAR_BPSHCFG_MASK << CBAR_BPSHCFG_SHIFT)
+#define CBAR_HYPC (CBAR_HYPC_MASK << CBAR_HYPC_SHIFT)
+#define CBAR_FB (CBAR_FB_MASK << CBAR_FB_SHIFT)
+#define CBAR_MEMATTR (CBAR_MEMATTR_MASK << CBAR_MEMATTR_SHIFT)
+#define CBAR_TYPE (CBAR_TYPE_MASK << CBAR_TYPE_SHIFT)
+#define CBAR_BSU (CBAR_BSU_MASK << CBAR_BSU_SHIFT)
+#define CBAR_RACFG (CBAR_RACFG_MASK << CBAR_RACFG_SHIFT)
+#define CBAR_WACFG (CBAR_WACFG_MASK << CBAR_WACFG_SHIFT)
+#define CBAR_IRPTNDX (CBAR_IRPTNDX_MASK << CBAR_IRPTNDX_SHIFT)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID (CBFRSYNRA_SID_MASK << CBFRSYNRA_SID_SHIFT)
+
+/* Performance Monitoring Register Fields */
+
+/* Stage 1 Context Bank Format Fields */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY \
+ (CB_ACTLR_REQPRIORITY_MASK << CB_ACTLR_REQPRIORITY_SHIFT)
+#define CB_ACTLR_REQPRIORITYCFG \
+ (CB_ACTLR_REQPRIORITYCFG_MASK << CB_ACTLR_REQPRIORITYCFG_SHIFT)
+#define CB_ACTLR_PRIVCFG (CB_ACTLR_PRIVCFG_MASK << CB_ACTLR_PRIVCFG_SHIFT)
+#define CB_ACTLR_BPRCOSH (CB_ACTLR_BPRCOSH_MASK << CB_ACTLR_BPRCOSH_SHIFT)
+#define CB_ACTLR_BPRCISH (CB_ACTLR_BPRCISH_MASK << CB_ACTLR_BPRCISH_SHIFT)
+#define CB_ACTLR_BPRCNSH (CB_ACTLR_BPRCNSH_MASK << CB_ACTLR_BPRCNSH_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR (CB_ATS1PR_ADDR_MASK << CB_ATS1PR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR (CB_ATS1PW_ADDR_MASK << CB_ATS1PW_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR (CB_ATS1UR_ADDR_MASK << CB_ATS1UR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR (CB_ATS1UW_ADDR_MASK << CB_ATS1UW_ADDR_SHIFT)
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE (CB_ATSR_ACTIVE_MASK << CB_ATSR_ACTIVE_SHIFT)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID (CB_CONTEXTIDR_ASID_MASK << \
+ CB_CONTEXTIDR_ASID_SHIFT)
+#define CB_CONTEXTIDR_PROCID (CB_CONTEXTIDR_PROCID_MASK << \
+ CB_CONTEXTIDR_PROCID_SHIFT)
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR (CB_FAR_FADDR_MASK << CB_FAR_FADDR_SHIFT)
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF (CB_FSR_TF_MASK << CB_FSR_TF_SHIFT)
+#define CB_FSR_AFF (CB_FSR_AFF_MASK << CB_FSR_AFF_SHIFT)
+#define CB_FSR_PF (CB_FSR_PF_MASK << CB_FSR_PF_SHIFT)
+#define CB_FSR_EF (CB_FSR_EF_MASK << CB_FSR_EF_SHIFT)
+#define CB_FSR_TLBMCF (CB_FSR_TLBMCF_MASK << CB_FSR_TLBMCF_SHIFT)
+#define CB_FSR_TLBLKF (CB_FSR_TLBLKF_MASK << CB_FSR_TLBLKF_SHIFT)
+#define CB_FSR_SS (CB_FSR_SS_MASK << CB_FSR_SS_SHIFT)
+#define CB_FSR_MULTI (CB_FSR_MULTI_MASK << CB_FSR_MULTI_SHIFT)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL (CB_FSYNR0_PLVL_MASK << CB_FSYNR0_PLVL_SHIFT)
+#define CB_FSYNR0_S1PTWF (CB_FSYNR0_S1PTWF_MASK << CB_FSYNR0_S1PTWF_SHIFT)
+#define CB_FSYNR0_WNR (CB_FSYNR0_WNR_MASK << CB_FSYNR0_WNR_SHIFT)
+#define CB_FSYNR0_PNU (CB_FSYNR0_PNU_MASK << CB_FSYNR0_PNU_SHIFT)
+#define CB_FSYNR0_IND (CB_FSYNR0_IND_MASK << CB_FSYNR0_IND_SHIFT)
+#define CB_FSYNR0_NSSTATE (CB_FSYNR0_NSSTATE_MASK << CB_FSYNR0_NSSTATE_SHIFT)
+#define CB_FSYNR0_NSATTR (CB_FSYNR0_NSATTR_MASK << CB_FSYNR0_NSATTR_SHIFT)
+#define CB_FSYNR0_ATOF (CB_FSYNR0_ATOF_MASK << CB_FSYNR0_ATOF_SHIFT)
+#define CB_FSYNR0_PTWF (CB_FSYNR0_PTWF_MASK << CB_FSYNR0_PTWF_SHIFT)
+#define CB_FSYNR0_AFR (CB_FSYNR0_AFR_MASK << CB_FSYNR0_AFR_SHIFT)
+#define CB_FSYNR0_S1CBNDX (CB_FSYNR0_S1CBNDX_MASK << CB_FSYNR0_S1CBNDX_SHIFT)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0 (CB_NMRR_IR0_MASK << CB_NMRR_IR0_SHIFT)
+#define CB_NMRR_IR1 (CB_NMRR_IR1_MASK << CB_NMRR_IR1_SHIFT)
+#define CB_NMRR_IR2 (CB_NMRR_IR2_MASK << CB_NMRR_IR2_SHIFT)
+#define CB_NMRR_IR3 (CB_NMRR_IR3_MASK << CB_NMRR_IR3_SHIFT)
+#define CB_NMRR_IR4 (CB_NMRR_IR4_MASK << CB_NMRR_IR4_SHIFT)
+#define CB_NMRR_IR5 (CB_NMRR_IR5_MASK << CB_NMRR_IR5_SHIFT)
+#define CB_NMRR_IR6 (CB_NMRR_IR6_MASK << CB_NMRR_IR6_SHIFT)
+#define CB_NMRR_IR7 (CB_NMRR_IR7_MASK << CB_NMRR_IR7_SHIFT)
+#define CB_NMRR_OR0 (CB_NMRR_OR0_MASK << CB_NMRR_OR0_SHIFT)
+#define CB_NMRR_OR1 (CB_NMRR_OR1_MASK << CB_NMRR_OR1_SHIFT)
+#define CB_NMRR_OR2 (CB_NMRR_OR2_MASK << CB_NMRR_OR2_SHIFT)
+#define CB_NMRR_OR3 (CB_NMRR_OR3_MASK << CB_NMRR_OR3_SHIFT)
+#define CB_NMRR_OR4 (CB_NMRR_OR4_MASK << CB_NMRR_OR4_SHIFT)
+#define CB_NMRR_OR5 (CB_NMRR_OR5_MASK << CB_NMRR_OR5_SHIFT)
+#define CB_NMRR_OR6 (CB_NMRR_OR6_MASK << CB_NMRR_OR6_SHIFT)
+#define CB_NMRR_OR7 (CB_NMRR_OR7_MASK << CB_NMRR_OR7_SHIFT)
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F (CB_PAR_F_MASK << CB_PAR_F_SHIFT)
+#define CB_PAR_SS (CB_PAR_SS_MASK << CB_PAR_SS_SHIFT)
+#define CB_PAR_OUTER (CB_PAR_OUTER_MASK << CB_PAR_OUTER_SHIFT)
+#define CB_PAR_INNER (CB_PAR_INNER_MASK << CB_PAR_INNER_SHIFT)
+#define CB_PAR_SH (CB_PAR_SH_MASK << CB_PAR_SH_SHIFT)
+#define CB_PAR_NS (CB_PAR_NS_MASK << CB_PAR_NS_SHIFT)
+#define CB_PAR_NOS (CB_PAR_NOS_MASK << CB_PAR_NOS_SHIFT)
+#define CB_PAR_PA (CB_PAR_PA_MASK << CB_PAR_PA_SHIFT)
+#define CB_PAR_TF (CB_PAR_TF_MASK << CB_PAR_TF_SHIFT)
+#define CB_PAR_AFF (CB_PAR_AFF_MASK << CB_PAR_AFF_SHIFT)
+#define CB_PAR_PF (CB_PAR_PF_MASK << CB_PAR_PF_SHIFT)
+#define CB_PAR_EF (CB_PAR_EF_MASK << CB_PAR_EF_SHIFT)
+#define CB_PAR_TLBMCF (CB_PAR_TLBMCF_MASK << CB_PAR_TLBMCF_SHIFT)
+#define CB_PAR_TLBLKF (CB_PAR_TLBLKF_MASK << CB_PAR_TLBLKF_SHIFT)
+#define CB_PAR_ATOT (CB_PAR_ATOT_MASK << CB_PAR_ATOT_SHIFT)
+#define CB_PAR_PLVL (CB_PAR_PLVL_MASK << CB_PAR_PLVL_SHIFT)
+#define CB_PAR_STAGE (CB_PAR_STAGE_MASK << CB_PAR_STAGE_SHIFT)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0 (CB_PRRR_TR0_MASK << CB_PRRR_TR0_SHIFT)
+#define CB_PRRR_TR1 (CB_PRRR_TR1_MASK << CB_PRRR_TR1_SHIFT)
+#define CB_PRRR_TR2 (CB_PRRR_TR2_MASK << CB_PRRR_TR2_SHIFT)
+#define CB_PRRR_TR3 (CB_PRRR_TR3_MASK << CB_PRRR_TR3_SHIFT)
+#define CB_PRRR_TR4 (CB_PRRR_TR4_MASK << CB_PRRR_TR4_SHIFT)
+#define CB_PRRR_TR5 (CB_PRRR_TR5_MASK << CB_PRRR_TR5_SHIFT)
+#define CB_PRRR_TR6 (CB_PRRR_TR6_MASK << CB_PRRR_TR6_SHIFT)
+#define CB_PRRR_TR7 (CB_PRRR_TR7_MASK << CB_PRRR_TR7_SHIFT)
+#define CB_PRRR_DS0 (CB_PRRR_DS0_MASK << CB_PRRR_DS0_SHIFT)
+#define CB_PRRR_DS1 (CB_PRRR_DS1_MASK << CB_PRRR_DS1_SHIFT)
+#define CB_PRRR_NS0 (CB_PRRR_NS0_MASK << CB_PRRR_NS0_SHIFT)
+#define CB_PRRR_NS1 (CB_PRRR_NS1_MASK << CB_PRRR_NS1_SHIFT)
+#define CB_PRRR_NOS0 (CB_PRRR_NOS0_MASK << CB_PRRR_NOS0_SHIFT)
+#define CB_PRRR_NOS1 (CB_PRRR_NOS1_MASK << CB_PRRR_NOS1_SHIFT)
+#define CB_PRRR_NOS2 (CB_PRRR_NOS2_MASK << CB_PRRR_NOS2_SHIFT)
+#define CB_PRRR_NOS3 (CB_PRRR_NOS3_MASK << CB_PRRR_NOS3_SHIFT)
+#define CB_PRRR_NOS4 (CB_PRRR_NOS4_MASK << CB_PRRR_NOS4_SHIFT)
+#define CB_PRRR_NOS5 (CB_PRRR_NOS5_MASK << CB_PRRR_NOS5_SHIFT)
+#define CB_PRRR_NOS6 (CB_PRRR_NOS6_MASK << CB_PRRR_NOS6_SHIFT)
+#define CB_PRRR_NOS7 (CB_PRRR_NOS7_MASK << CB_PRRR_NOS7_SHIFT)
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR (CB_RESUME_TNR_MASK << CB_RESUME_TNR_SHIFT)
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M (CB_SCTLR_M_MASK << CB_SCTLR_M_SHIFT)
+#define CB_SCTLR_TRE (CB_SCTLR_TRE_MASK << CB_SCTLR_TRE_SHIFT)
+#define CB_SCTLR_AFE (CB_SCTLR_AFE_MASK << CB_SCTLR_AFE_SHIFT)
+#define CB_SCTLR_AFFD (CB_SCTLR_AFFD_MASK << CB_SCTLR_AFFD_SHIFT)
+#define CB_SCTLR_E (CB_SCTLR_E_MASK << CB_SCTLR_E_SHIFT)
+#define CB_SCTLR_CFRE (CB_SCTLR_CFRE_MASK << CB_SCTLR_CFRE_SHIFT)
+#define CB_SCTLR_CFIE (CB_SCTLR_CFIE_MASK << CB_SCTLR_CFIE_SHIFT)
+#define CB_SCTLR_CFCFG (CB_SCTLR_CFCFG_MASK << CB_SCTLR_CFCFG_SHIFT)
+#define CB_SCTLR_HUPCF (CB_SCTLR_HUPCF_MASK << CB_SCTLR_HUPCF_SHIFT)
+#define CB_SCTLR_WXN (CB_SCTLR_WXN_MASK << CB_SCTLR_WXN_SHIFT)
+#define CB_SCTLR_UWXN (CB_SCTLR_UWXN_MASK << CB_SCTLR_UWXN_SHIFT)
+#define CB_SCTLR_ASIDPNE (CB_SCTLR_ASIDPNE_MASK << CB_SCTLR_ASIDPNE_SHIFT)
+#define CB_SCTLR_TRANSIENTCFG (CB_SCTLR_TRANSIENTCFG_MASK << \
+ CB_SCTLR_TRANSIENTCFG_SHIFT)
+#define CB_SCTLR_MEMATTR (CB_SCTLR_MEMATTR_MASK << CB_SCTLR_MEMATTR_SHIFT)
+#define CB_SCTLR_MTCFG (CB_SCTLR_MTCFG_MASK << CB_SCTLR_MTCFG_SHIFT)
+#define CB_SCTLR_SHCFG (CB_SCTLR_SHCFG_MASK << CB_SCTLR_SHCFG_SHIFT)
+#define CB_SCTLR_RACFG (CB_SCTLR_RACFG_MASK << CB_SCTLR_RACFG_SHIFT)
+#define CB_SCTLR_WACFG (CB_SCTLR_WACFG_MASK << CB_SCTLR_WACFG_SHIFT)
+#define CB_SCTLR_NSCFG (CB_SCTLR_NSCFG_MASK << CB_SCTLR_NSCFG_SHIFT)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID (CB_TLBIASID_ASID_MASK << CB_TLBIASID_ASID_SHIFT)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID (CB_TLBIVA_ASID_MASK << CB_TLBIVA_ASID_SHIFT)
+#define CB_TLBIVA_VA (CB_TLBIVA_VA_MASK << CB_TLBIVA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA (CB_TLBIVAA_VA_MASK << CB_TLBIVAA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA (CB_TLBIVAAL_VA_MASK << CB_TLBIVAAL_VA_SHIFT)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID (CB_TLBIVAL_ASID_MASK << CB_TLBIVAL_ASID_SHIFT)
+#define CB_TLBIVAL_VA (CB_TLBIVAL_VA_MASK << CB_TLBIVAL_VA_SHIFT)
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE (CB_TLBSTATUS_SACTIVE_MASK << \
+ CB_TLBSTATUS_SACTIVE_SHIFT)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_EAE (CB_TTBCR_EAE_MASK << CB_TTBCR_EAE_SHIFT)
+
+#define CB_TTBR0_ADDR (CB_TTBR0_ADDR_MASK << CB_TTBR0_ADDR_SHIFT)
+
+#ifdef CONFIG_IOMMU_LPAE
+/* Translation Table Base Register: CB_TTBR */
+#define CB_TTBR0_ASID (CB_TTBR0_ASID_MASK << CB_TTBR0_ASID_SHIFT)
+#define CB_TTBR1_ASID (CB_TTBR1_ASID_MASK << CB_TTBR1_ASID_SHIFT)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ (CB_TTBCR_T0SZ_MASK << CB_TTBCR_T0SZ_SHIFT)
+#define CB_TTBCR_T1SZ (CB_TTBCR_T1SZ_MASK << CB_TTBCR_T1SZ_SHIFT)
+#define CB_TTBCR_EPD0 (CB_TTBCR_EPD0_MASK << CB_TTBCR_EPD0_SHIFT)
+#define CB_TTBCR_EPD1 (CB_TTBCR_EPD1_MASK << CB_TTBCR_EPD1_SHIFT)
+#define CB_TTBCR_IRGN0 (CB_TTBCR_IRGN0_MASK << CB_TTBCR_IRGN0_SHIFT)
+#define CB_TTBCR_IRGN1 (CB_TTBCR_IRGN1_MASK << CB_TTBCR_IRGN1_SHIFT)
+#define CB_TTBCR_ORGN0 (CB_TTBCR_ORGN0_MASK << CB_TTBCR_ORGN0_SHIFT)
+#define CB_TTBCR_ORGN1 (CB_TTBCR_ORGN1_MASK << CB_TTBCR_ORGN1_SHIFT)
+#define CB_TTBCR_NSCFG0 (CB_TTBCR_NSCFG0_MASK << CB_TTBCR_NSCFG0_SHIFT)
+#define CB_TTBCR_NSCFG1 (CB_TTBCR_NSCFG1_MASK << CB_TTBCR_NSCFG1_SHIFT)
+#define CB_TTBCR_SH0 (CB_TTBCR_SH0_MASK << CB_TTBCR_SH0_SHIFT)
+#define CB_TTBCR_SH1 (CB_TTBCR_SH1_MASK << CB_TTBCR_SH1_SHIFT)
+#define CB_TTBCR_A1 (CB_TTBCR_A1_MASK << CB_TTBCR_A1_SHIFT)
+
+#else
+
+/* Translation Table Base Register 0: CB_TTBR0 */
+#define CB_TTBR0_IRGN1 (CB_TTBR0_IRGN1_MASK << CB_TTBR0_IRGN1_SHIFT)
+#define CB_TTBR0_S (CB_TTBR0_S_MASK << CB_TTBR0_S_SHIFT)
+#define CB_TTBR0_RGN (CB_TTBR0_RGN_MASK << CB_TTBR0_RGN_SHIFT)
+#define CB_TTBR0_NOS (CB_TTBR0_NOS_MASK << CB_TTBR0_NOS_SHIFT)
+#define CB_TTBR0_IRGN0 (CB_TTBR0_IRGN0_MASK << CB_TTBR0_IRGN0_SHIFT)
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define CB_TTBR1_IRGN1 (CB_TTBR1_IRGN1_MASK << CB_TTBR1_IRGN1_SHIFT)
+#define CB_TTBR1_S (CB_TTBR1_S_MASK << CB_TTBR1_S_SHIFT)
+#define CB_TTBR1_RGN (CB_TTBR1_RGN_MASK << CB_TTBR1_RGN_SHIFT)
+#define CB_TTBR1_NOS (CB_TTBR1_NOS_MASK << CB_TTBR1_NOS_SHIFT)
+#define CB_TTBR1_IRGN0 (CB_TTBR1_IRGN0_MASK << CB_TTBR1_IRGN0_SHIFT)
+#endif
+
+/* Global Register Masks */
+/* Configuration Register 0 */
+#define CR0_NSCFG_MASK 0x03
+#define CR0_WACFG_MASK 0x03
+#define CR0_RACFG_MASK 0x03
+#define CR0_SHCFG_MASK 0x03
+#define CR0_SMCFCFG_MASK 0x01
+#define NSCR0_SMCFCFG_MASK 0x01
+#define CR0_MTCFG_MASK 0x01
+#define CR0_MEMATTR_MASK 0x0F
+#define CR0_BSU_MASK 0x03
+#define CR0_FB_MASK 0x01
+#define CR0_PTM_MASK 0x01
+#define CR0_VMIDPNE_MASK 0x01
+#define CR0_USFCFG_MASK 0x01
+#define NSCR0_USFCFG_MASK 0x01
+#define CR0_GSE_MASK 0x01
+#define CR0_STALLD_MASK 0x01
+#define NSCR0_STALLD_MASK 0x01
+#define CR0_TRANSIENTCFG_MASK 0x03
+#define CR0_GCFGFIE_MASK 0x01
+#define NSCR0_GCFGFIE_MASK 0x01
+#define CR0_GCFGFRE_MASK 0x01
+#define NSCR0_GCFGFRE_MASK 0x01
+#define CR0_GFIE_MASK 0x01
+#define NSCR0_GFIE_MASK 0x01
+#define CR0_GFRE_MASK 0x01
+#define NSCR0_GFRE_MASK 0x01
+#define CR0_CLIENTPD_MASK 0x01
+#define NSCR0_CLIENTPD_MASK 0x01
+
+/* ACR */
+#define ACR_SMTNMC_BPTLBEN_MASK 0x01
+#define ACR_MMUDIS_BPTLBEN_MASK 0x01
+#define ACR_S2CR_BPTLBEN_MASK 0x01
+
+/* NSACR */
+#define NSACR_SMTNMC_BPTLBEN_MASK 0x01
+#define NSACR_MMUDIS_BPTLBEN_MASK 0x01
+#define NSACR_S2CR_BPTLBEN_MASK 0x01
+
+/* Configuration Register 2 */
+#define CR2_BPVMID_MASK 0xFF
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_MASK 0xFFFFF
+#define GATS1PR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_MASK 0xFFFFF
+#define GATS1PW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_MASK 0xFFFFF
+#define GATS1UR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_MASK 0xFFFFF
+#define GATS1UW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR_MASK 0xFFFFF
+#define GATS12PR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR_MASK 0xFFFFF
+#define GATS12PW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR_MASK 0xFFFFF
+#define GATS12UR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR_MASK 0xFFFFF
+#define GATS12UW_NDX_MASK 0xFF
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_MASK 0x01
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_MASK 0xFFFFFFFF
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_MASK 0x01
+#define GFSR_USF_MASK 0x01
+#define GFSR_SMCF_MASK 0x01
+#define GFSR_UCBF_MASK 0x01
+#define GFSR_UCIF_MASK 0x01
+#define GFSR_CAF_MASK 0x01
+#define GFSR_EF_MASK 0x01
+#define GFSR_PF_MASK 0x01
+#define GFSR_MULTI_MASK 0x01
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_MASK 0x01
+#define GFSYNR0_WNR_MASK 0x01
+#define GFSYNR0_PNU_MASK 0x01
+#define GFSYNR0_IND_MASK 0x01
+#define GFSYNR0_NSSTATE_MASK 0x01
+#define GFSYNR0_NSATTR_MASK 0x01
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_MASK 0x7FFF
+#define GFSYNr1_SSD_IDX_MASK 0x7FFF
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_MASK 0x01
+#define GPAR_SS_MASK 0x01
+#define GPAR_OUTER_MASK 0x03
+#define GPAR_INNER_MASK 0x03
+#define GPAR_SH_MASK 0x01
+#define GPAR_NS_MASK 0x01
+#define GPAR_NOS_MASK 0x01
+#define GPAR_PA_MASK 0xFFFFF
+#define GPAR_TF_MASK 0x01
+#define GPAR_AFF_MASK 0x01
+#define GPAR_PF_MASK 0x01
+#define GPAR_EF_MASK 0x01
+#define GPAR_TLBMCF_MASK 0x01
+#define GPAR_TLBLKF_MASK 0x01
+#define GPAR_UCBF_MASK 0x01
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_MASK 0xFF
+#define IDR0_NUMSIDB_MASK 0x0F
+#define IDR0_BTM_MASK 0x01
+#define IDR0_CTTW_MASK 0x01
+#define IDR0_NUMIPRT_MASK 0xFF
+#define IDR0_PTFS_MASK 0x01
+#define IDR0_SMS_MASK 0x01
+#define IDR0_NTS_MASK 0x01
+#define IDR0_S2TS_MASK 0x01
+#define IDR0_S1TS_MASK 0x01
+#define IDR0_SES_MASK 0x01
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_MASK 0xFF
+#define IDR1_NUMSSDNDXB_MASK 0x0F
+#define IDR1_SSDTP_MASK 0x01
+#define IDR1_SMCD_MASK 0x01
+#define IDR1_NUMS2CB_MASK 0xFF
+#define IDR1_NUMPAGENDXB_MASK 0x07
+#define IDR1_PAGESIZE_MASK 0x01
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_MASK 0x0F
+#define IDR2_OAS_MASK 0x0F
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_MASK 0x0F
+#define IDR7_MAJOR_MASK 0x0F
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_MASK 0xFF
+#define S2CR_SHCFG_MASK 0x03
+#define S2CR_MTCFG_MASK 0x01
+#define S2CR_MEMATTR_MASK 0x0F
+#define S2CR_TYPE_MASK 0x03
+#define S2CR_NSCFG_MASK 0x03
+#define S2CR_RACFG_MASK 0x03
+#define S2CR_WACFG_MASK 0x03
+#define S2CR_PRIVCFG_MASK 0x03
+#define S2CR_INSTCFG_MASK 0x03
+#define S2CR_TRANSIENTCFG_MASK 0x03
+#define S2CR_VMID_MASK 0xFF
+#define S2CR_BSU_MASK 0x03
+#define S2CR_FB_MASK 0x01
+
+/* Stream Match Register: SMR */
+#define SMR_ID_MASK 0x7FFF
+#define SMR_MASK_MASK 0x7FFF
+#define SMR_VALID_MASK 0x01
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_MASK 0x01
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_MASK 0xFFFFF
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_MASK 0xFF
+
+/* Global Register Space 1 Mask */
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_MASK 0xFF
+#define CBAR_CBNDX_MASK 0x03
+#define CBAR_BPSHCFG_MASK 0x03
+#define CBAR_HYPC_MASK 0x01
+#define CBAR_FB_MASK 0x01
+#define CBAR_MEMATTR_MASK 0x0F
+#define CBAR_TYPE_MASK 0x03
+#define CBAR_BSU_MASK 0x03
+#define CBAR_RACFG_MASK 0x03
+#define CBAR_WACFG_MASK 0x03
+#define CBAR_IRPTNDX_MASK 0xFF
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_MASK 0x7FFF
+
+/* Implementation defined register space masks */
+#define MICRO_MMU_CTRL_RESERVED_MASK 0x03
+#define MICRO_MMU_CTRL_HALT_REQ_MASK 0x01
+#define MICRO_MMU_CTRL_IDLE_MASK 0x01
+
+/* Stage 1 Context Bank Format Masks */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_MASK 0x3
+#define CB_ACTLR_REQPRIORITYCFG_MASK 0x1
+#define CB_ACTLR_PRIVCFG_MASK 0x3
+#define CB_ACTLR_BPRCOSH_MASK 0x1
+#define CB_ACTLR_BPRCISH_MASK 0x1
+#define CB_ACTLR_BPRCNSH_MASK 0x1
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_MASK 0xFFFFF
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_MASK 0x01
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_MASK 0xFF
+#define CB_CONTEXTIDR_PROCID_MASK 0xFFFFFF
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_MASK 0xFFFFFFFF
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_MASK 0x01
+#define CB_FSR_AFF_MASK 0x01
+#define CB_FSR_PF_MASK 0x01
+#define CB_FSR_EF_MASK 0x01
+#define CB_FSR_TLBMCF_MASK 0x01
+#define CB_FSR_TLBLKF_MASK 0x01
+#define CB_FSR_SS_MASK 0x01
+#define CB_FSR_MULTI_MASK 0x01
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_MASK 0x03
+#define CB_FSYNR0_S1PTWF_MASK 0x01
+#define CB_FSYNR0_WNR_MASK 0x01
+#define CB_FSYNR0_PNU_MASK 0x01
+#define CB_FSYNR0_IND_MASK 0x01
+#define CB_FSYNR0_NSSTATE_MASK 0x01
+#define CB_FSYNR0_NSATTR_MASK 0x01
+#define CB_FSYNR0_ATOF_MASK 0x01
+#define CB_FSYNR0_PTWF_MASK 0x01
+#define CB_FSYNR0_AFR_MASK 0x01
+#define CB_FSYNR0_S1CBNDX_MASK 0xFF
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_MASK 0x03
+#define CB_NMRR_IR1_MASK 0x03
+#define CB_NMRR_IR2_MASK 0x03
+#define CB_NMRR_IR3_MASK 0x03
+#define CB_NMRR_IR4_MASK 0x03
+#define CB_NMRR_IR5_MASK 0x03
+#define CB_NMRR_IR6_MASK 0x03
+#define CB_NMRR_IR7_MASK 0x03
+#define CB_NMRR_OR0_MASK 0x03
+#define CB_NMRR_OR1_MASK 0x03
+#define CB_NMRR_OR2_MASK 0x03
+#define CB_NMRR_OR3_MASK 0x03
+#define CB_NMRR_OR4_MASK 0x03
+#define CB_NMRR_OR5_MASK 0x03
+#define CB_NMRR_OR6_MASK 0x03
+#define CB_NMRR_OR7_MASK 0x03
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_MASK 0x01
+#define CB_PAR_SS_MASK 0x01
+#define CB_PAR_OUTER_MASK 0x03
+#define CB_PAR_INNER_MASK 0x07
+#define CB_PAR_SH_MASK 0x01
+#define CB_PAR_NS_MASK 0x01
+#define CB_PAR_NOS_MASK 0x01
+#define CB_PAR_PA_MASK 0xFFFFF
+#define CB_PAR_TF_MASK 0x01
+#define CB_PAR_AFF_MASK 0x01
+#define CB_PAR_PF_MASK 0x01
+#define CB_PAR_EF_MASK 0x01
+#define CB_PAR_TLBMCF_MASK 0x01
+#define CB_PAR_TLBLKF_MASK 0x01
+#define CB_PAR_ATOT_MASK 0x01ULL
+#define CB_PAR_PLVL_MASK 0x03ULL
+#define CB_PAR_STAGE_MASK 0x01ULL
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_MASK 0x03
+#define CB_PRRR_TR1_MASK 0x03
+#define CB_PRRR_TR2_MASK 0x03
+#define CB_PRRR_TR3_MASK 0x03
+#define CB_PRRR_TR4_MASK 0x03
+#define CB_PRRR_TR5_MASK 0x03
+#define CB_PRRR_TR6_MASK 0x03
+#define CB_PRRR_TR7_MASK 0x03
+#define CB_PRRR_DS0_MASK 0x01
+#define CB_PRRR_DS1_MASK 0x01
+#define CB_PRRR_NS0_MASK 0x01
+#define CB_PRRR_NS1_MASK 0x01
+#define CB_PRRR_NOS0_MASK 0x01
+#define CB_PRRR_NOS1_MASK 0x01
+#define CB_PRRR_NOS2_MASK 0x01
+#define CB_PRRR_NOS3_MASK 0x01
+#define CB_PRRR_NOS4_MASK 0x01
+#define CB_PRRR_NOS5_MASK 0x01
+#define CB_PRRR_NOS6_MASK 0x01
+#define CB_PRRR_NOS7_MASK 0x01
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_MASK 0x01
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_MASK 0x01
+#define CB_SCTLR_TRE_MASK 0x01
+#define CB_SCTLR_AFE_MASK 0x01
+#define CB_SCTLR_AFFD_MASK 0x01
+#define CB_SCTLR_E_MASK 0x01
+#define CB_SCTLR_CFRE_MASK 0x01
+#define CB_SCTLR_CFIE_MASK 0x01
+#define CB_SCTLR_CFCFG_MASK 0x01
+#define CB_SCTLR_HUPCF_MASK 0x01
+#define CB_SCTLR_WXN_MASK 0x01
+#define CB_SCTLR_UWXN_MASK 0x01
+#define CB_SCTLR_ASIDPNE_MASK 0x01
+#define CB_SCTLR_TRANSIENTCFG_MASK 0x03
+#define CB_SCTLR_MEMATTR_MASK 0x0F
+#define CB_SCTLR_MTCFG_MASK 0x01
+#define CB_SCTLR_SHCFG_MASK 0x03
+#define CB_SCTLR_RACFG_MASK 0x03
+#define CB_SCTLR_WACFG_MASK 0x03
+#define CB_SCTLR_NSCFG_MASK 0x03
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_MASK 0xFF
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_MASK 0xFF
+#define CB_TLBIVA_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_MASK 0xFF
+#define CB_TLBIVAL_VA_MASK 0xFFFFF
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_MASK 0x01
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_MASK 0x07
+#define CB_TTBCR_T1SZ_MASK 0x07
+#define CB_TTBCR_EPD0_MASK 0x01
+#define CB_TTBCR_EPD1_MASK 0x01
+#define CB_TTBCR_IRGN0_MASK 0x03
+#define CB_TTBCR_IRGN1_MASK 0x03
+#define CB_TTBCR_ORGN0_MASK 0x03
+#define CB_TTBCR_ORGN1_MASK 0x03
+#define CB_TTBCR_NSCFG0_MASK 0x01
+#define CB_TTBCR_NSCFG1_MASK 0x01
+#define CB_TTBCR_SH0_MASK 0x03
+#define CB_TTBCR_SH1_MASK 0x03
+#define CB_TTBCR_A1_MASK 0x01
+#define CB_TTBCR_EAE_MASK 0x01
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#ifdef CONFIG_IOMMU_LPAE
+#define CB_TTBR0_ADDR_MASK 0x7FFFFFFFFULL
+#define CB_TTBR0_ASID_MASK 0xFF
+#define CB_TTBR1_ASID_MASK 0xFF
+#else
+#define CB_TTBR0_IRGN1_MASK 0x01
+#define CB_TTBR0_S_MASK 0x01
+#define CB_TTBR0_RGN_MASK 0x01
+#define CB_TTBR0_NOS_MASK 0x01
+#define CB_TTBR0_IRGN0_MASK 0x01
+#define CB_TTBR0_ADDR_MASK 0xFFFFFF
+
+#define CB_TTBR1_IRGN1_MASK 0x1
+#define CB_TTBR1_S_MASK 0x1
+#define CB_TTBR1_RGN_MASK 0x1
+#define CB_TTBR1_NOS_MASK 0X1
+#define CB_TTBR1_IRGN0_MASK 0X1
+#endif
+
+/* Global Register Shifts */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG_SHIFT 28
+#define CR0_WACFG_SHIFT 26
+#define CR0_RACFG_SHIFT 24
+#define CR0_SHCFG_SHIFT 22
+#define CR0_SMCFCFG_SHIFT 21
+#define NSCR0_SMCFCFG_SHIFT 21
+#define CR0_MTCFG_SHIFT 20
+#define CR0_MEMATTR_SHIFT 16
+#define CR0_BSU_SHIFT 14
+#define CR0_FB_SHIFT 13
+#define CR0_PTM_SHIFT 12
+#define CR0_VMIDPNE_SHIFT 11
+#define CR0_USFCFG_SHIFT 10
+#define NSCR0_USFCFG_SHIFT 10
+#define CR0_GSE_SHIFT 9
+#define CR0_STALLD_SHIFT 8
+#define NSCR0_STALLD_SHIFT 8
+#define CR0_TRANSIENTCFG_SHIFT 6
+#define CR0_GCFGFIE_SHIFT 5
+#define NSCR0_GCFGFIE_SHIFT 5
+#define CR0_GCFGFRE_SHIFT 4
+#define NSCR0_GCFGFRE_SHIFT 4
+#define CR0_GFIE_SHIFT 2
+#define NSCR0_GFIE_SHIFT 2
+#define CR0_GFRE_SHIFT 1
+#define NSCR0_GFRE_SHIFT 1
+#define CR0_CLIENTPD_SHIFT 0
+#define NSCR0_CLIENTPD_SHIFT 0
+
+/* ACR */
+#define ACR_SMTNMC_BPTLBEN_SHIFT 8
+#define ACR_MMUDIS_BPTLBEN_SHIFT 9
+#define ACR_S2CR_BPTLBEN_SHIFT 10
+
+/* NSACR */
+#define NSACR_SMTNMC_BPTLBEN_SHIFT 8
+#define NSACR_MMUDIS_BPTLBEN_SHIFT 9
+#define NSACR_S2CR_BPTLBEN_SHIFT 10
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID_SHIFT 0
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_SHIFT 12
+#define GATS1PR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_SHIFT 12
+#define GATS1PW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_SHIFT 12
+#define GATS1UR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_SHIFT 12
+#define GATS1UW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define GATS12PR_ADDR_SHIFT 12
+#define GATS12PR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS12PW */
+#define GATS12PW_ADDR_SHIFT 12
+#define GATS12PW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS12UR */
+#define GATS12UR_ADDR_SHIFT 12
+#define GATS12UR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS12UW */
+#define GATS12UW_ADDR_SHIFT 12
+#define GATS12UW_NDX_SHIFT 0
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_SHIFT 0
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_SHIFT 0
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_SHIFT 0
+#define GFSR_USF_SHIFT 1
+#define GFSR_SMCF_SHIFT 2
+#define GFSR_UCBF_SHIFT 3
+#define GFSR_UCIF_SHIFT 4
+#define GFSR_CAF_SHIFT 5
+#define GFSR_EF_SHIFT 6
+#define GFSR_PF_SHIFT 7
+#define GFSR_MULTI_SHIFT 31
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_SHIFT 0
+#define GFSYNR0_WNR_SHIFT 1
+#define GFSYNR0_PNU_SHIFT 2
+#define GFSYNR0_IND_SHIFT 3
+#define GFSYNR0_NSSTATE_SHIFT 4
+#define GFSYNR0_NSATTR_SHIFT 5
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_SHIFT 0
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_SHIFT 0
+#define GPAR_SS_SHIFT 1
+#define GPAR_OUTER_SHIFT 2
+#define GPAR_INNER_SHIFT 4
+#define GPAR_SH_SHIFT 7
+#define GPAR_NS_SHIFT 9
+#define GPAR_NOS_SHIFT 10
+#define GPAR_PA_SHIFT 12
+#define GPAR_TF_SHIFT 1
+#define GPAR_AFF_SHIFT 2
+#define GPAR_PF_SHIFT 3
+#define GPAR_EF_SHIFT 4
+#define GPAR_TLCMCF_SHIFT 5
+#define GPAR_TLBLKF_SHIFT 6
+#define GFAR_UCBF_SHIFT 30
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_SHIFT 0
+#define IDR0_NUMSIDB_SHIFT 9
+#define IDR0_BTM_SHIFT 13
+#define IDR0_CTTW_SHIFT 14
+#define IDR0_NUMIRPT_SHIFT 16
+#define IDR0_PTFS_SHIFT 24
+#define IDR0_SMS_SHIFT 27
+#define IDR0_NTS_SHIFT 28
+#define IDR0_S2TS_SHIFT 29
+#define IDR0_S1TS_SHIFT 30
+#define IDR0_SES_SHIFT 31
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_SHIFT 0
+#define IDR1_NUMSSDNDXB_SHIFT 8
+#define IDR1_SSDTP_SHIFT 12
+#define IDR1_SMCD_SHIFT 15
+#define IDR1_NUMS2CB_SHIFT 16
+#define IDR1_NUMPAGENDXB_SHIFT 28
+#define IDR1_PAGESIZE_SHIFT 31
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_SHIFT 0
+#define IDR2_OAS_SHIFT 4
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_SHIFT 0
+#define IDR7_MAJOR_SHIFT 4
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_SHIFT 0
+#define s2CR_SHCFG_SHIFT 8
+#define S2CR_MTCFG_SHIFT 11
+#define S2CR_MEMATTR_SHIFT 12
+#define S2CR_TYPE_SHIFT 16
+#define S2CR_NSCFG_SHIFT 18
+#define S2CR_RACFG_SHIFT 20
+#define S2CR_WACFG_SHIFT 22
+#define S2CR_PRIVCFG_SHIFT 24
+#define S2CR_INSTCFG_SHIFT 26
+#define S2CR_TRANSIENTCFG_SHIFT 28
+#define S2CR_VMID_SHIFT 0
+#define S2CR_BSU_SHIFT 24
+#define S2CR_FB_SHIFT 26
+
+/* Stream Match Register: SMR */
+#define SMR_ID_SHIFT 0
+#define SMR_MASK_SHIFT 16
+#define SMR_VALID_SHIFT 31
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_SHIFT 0
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_SHIFT 12
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_SHIFT 0
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_SHIFT 0
+#define CBAR_CBNDX_SHIFT 8
+#define CBAR_BPSHCFG_SHIFT 8
+#define CBAR_HYPC_SHIFT 10
+#define CBAR_FB_SHIFT 11
+#define CBAR_MEMATTR_SHIFT 12
+#define CBAR_TYPE_SHIFT 16
+#define CBAR_BSU_SHIFT 18
+#define CBAR_RACFG_SHIFT 20
+#define CBAR_WACFG_SHIFT 22
+#define CBAR_IRPTNDX_SHIFT 24
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_SHIFT 0
+
+/* Implementation defined register space shift */
+#define MICRO_MMU_CTRL_RESERVED_SHIFT 0x00
+#define MICRO_MMU_CTRL_HALT_REQ_SHIFT 0x02
+#define MICRO_MMU_CTRL_IDLE_SHIFT 0x03
+
+/* Stage 1 Context Bank Format Shifts */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_SHIFT 0
+#define CB_ACTLR_REQPRIORITYCFG_SHIFT 4
+#define CB_ACTLR_PRIVCFG_SHIFT 8
+#define CB_ACTLR_BPRCOSH_SHIFT 28
+#define CB_ACTLR_BPRCISH_SHIFT 29
+#define CB_ACTLR_BPRCNSH_SHIFT 30
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_SHIFT 12
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_SHIFT 0
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_SHIFT 0
+#define CB_CONTEXTIDR_PROCID_SHIFT 8
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_SHIFT 0
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_SHIFT 1
+#define CB_FSR_AFF_SHIFT 2
+#define CB_FSR_PF_SHIFT 3
+#define CB_FSR_EF_SHIFT 4
+#define CB_FSR_TLBMCF_SHIFT 5
+#define CB_FSR_TLBLKF_SHIFT 6
+#define CB_FSR_SS_SHIFT 30
+#define CB_FSR_MULTI_SHIFT 31
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_SHIFT 0
+#define CB_FSYNR0_S1PTWF_SHIFT 3
+#define CB_FSYNR0_WNR_SHIFT 4
+#define CB_FSYNR0_PNU_SHIFT 5
+#define CB_FSYNR0_IND_SHIFT 6
+#define CB_FSYNR0_NSSTATE_SHIFT 7
+#define CB_FSYNR0_NSATTR_SHIFT 8
+#define CB_FSYNR0_ATOF_SHIFT 9
+#define CB_FSYNR0_PTWF_SHIFT 10
+#define CB_FSYNR0_AFR_SHIFT 11
+#define CB_FSYNR0_S1CBNDX_SHIFT 16
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_SHIFT 0
+#define CB_NMRR_IR1_SHIFT 2
+#define CB_NMRR_IR2_SHIFT 4
+#define CB_NMRR_IR3_SHIFT 6
+#define CB_NMRR_IR4_SHIFT 8
+#define CB_NMRR_IR5_SHIFT 10
+#define CB_NMRR_IR6_SHIFT 12
+#define CB_NMRR_IR7_SHIFT 14
+#define CB_NMRR_OR0_SHIFT 16
+#define CB_NMRR_OR1_SHIFT 18
+#define CB_NMRR_OR2_SHIFT 20
+#define CB_NMRR_OR3_SHIFT 22
+#define CB_NMRR_OR4_SHIFT 24
+#define CB_NMRR_OR5_SHIFT 26
+#define CB_NMRR_OR6_SHIFT 28
+#define CB_NMRR_OR7_SHIFT 30
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_SHIFT 0
+#define CB_PAR_SS_SHIFT 1
+#define CB_PAR_OUTER_SHIFT 2
+#define CB_PAR_INNER_SHIFT 4
+#define CB_PAR_SH_SHIFT 7
+#define CB_PAR_NS_SHIFT 9
+#define CB_PAR_NOS_SHIFT 10
+#define CB_PAR_PA_SHIFT 12
+#define CB_PAR_TF_SHIFT 1
+#define CB_PAR_AFF_SHIFT 2
+#define CB_PAR_PF_SHIFT 3
+#define CB_PAR_EF_SHIFT 4
+#define CB_PAR_TLBMCF_SHIFT 5
+#define CB_PAR_TLBLKF_SHIFT 6
+#define CB_PAR_ATOT_SHIFT 31
+#define CB_PAR_PLVL_SHIFT 32
+#define CB_PAR_STAGE_SHIFT 35
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_SHIFT 0
+#define CB_PRRR_TR1_SHIFT 2
+#define CB_PRRR_TR2_SHIFT 4
+#define CB_PRRR_TR3_SHIFT 6
+#define CB_PRRR_TR4_SHIFT 8
+#define CB_PRRR_TR5_SHIFT 10
+#define CB_PRRR_TR6_SHIFT 12
+#define CB_PRRR_TR7_SHIFT 14
+#define CB_PRRR_DS0_SHIFT 16
+#define CB_PRRR_DS1_SHIFT 17
+#define CB_PRRR_NS0_SHIFT 18
+#define CB_PRRR_NS1_SHIFT 19
+#define CB_PRRR_NOS0_SHIFT 24
+#define CB_PRRR_NOS1_SHIFT 25
+#define CB_PRRR_NOS2_SHIFT 26
+#define CB_PRRR_NOS3_SHIFT 27
+#define CB_PRRR_NOS4_SHIFT 28
+#define CB_PRRR_NOS5_SHIFT 29
+#define CB_PRRR_NOS6_SHIFT 30
+#define CB_PRRR_NOS7_SHIFT 31
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_SHIFT 0
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_SHIFT 0
+#define CB_SCTLR_TRE_SHIFT 1
+#define CB_SCTLR_AFE_SHIFT 2
+#define CB_SCTLR_AFFD_SHIFT 3
+#define CB_SCTLR_E_SHIFT 4
+#define CB_SCTLR_CFRE_SHIFT 5
+#define CB_SCTLR_CFIE_SHIFT 6
+#define CB_SCTLR_CFCFG_SHIFT 7
+#define CB_SCTLR_HUPCF_SHIFT 8
+#define CB_SCTLR_WXN_SHIFT 9
+#define CB_SCTLR_UWXN_SHIFT 10
+#define CB_SCTLR_ASIDPNE_SHIFT 12
+#define CB_SCTLR_TRANSIENTCFG_SHIFT 14
+#define CB_SCTLR_MEMATTR_SHIFT 16
+#define CB_SCTLR_MTCFG_SHIFT 20
+#define CB_SCTLR_SHCFG_SHIFT 22
+#define CB_SCTLR_RACFG_SHIFT 24
+#define CB_SCTLR_WACFG_SHIFT 26
+#define CB_SCTLR_NSCFG_SHIFT 28
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_SHIFT 0
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_SHIFT 0
+#define CB_TLBIVA_VA_SHIFT 12
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_SHIFT 12
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_SHIFT 12
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_SHIFT 0
+#define CB_TLBIVAL_VA_SHIFT 12
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_SHIFT 0
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_SHIFT 0
+#define CB_TTBCR_T1SZ_SHIFT 16
+#define CB_TTBCR_EPD0_SHIFT 4
+#define CB_TTBCR_EPD1_SHIFT 5
+#define CB_TTBCR_NSCFG0_SHIFT 14
+#define CB_TTBCR_NSCFG1_SHIFT 30
+#define CB_TTBCR_EAE_SHIFT 31
+#define CB_TTBCR_IRGN0_SHIFT 8
+#define CB_TTBCR_IRGN1_SHIFT 24
+#define CB_TTBCR_ORGN0_SHIFT 10
+#define CB_TTBCR_ORGN1_SHIFT 26
+#define CB_TTBCR_A1_SHIFT 22
+#define CB_TTBCR_SH0_SHIFT 12
+#define CB_TTBCR_SH1_SHIFT 28
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#ifdef CONFIG_IOMMU_LPAE
+#define CB_TTBR0_ADDR_SHIFT 5
+#define CB_TTBR0_ASID_SHIFT 48
+#define CB_TTBR1_ASID_SHIFT 48
+#else
+#define CB_TTBR0_IRGN1_SHIFT 0
+#define CB_TTBR0_S_SHIFT 1
+#define CB_TTBR0_RGN_SHIFT 3
+#define CB_TTBR0_NOS_SHIFT 5
+#define CB_TTBR0_IRGN0_SHIFT 6
+#define CB_TTBR0_ADDR_SHIFT 14
+
+#define CB_TTBR1_IRGN1_SHIFT 0
+#define CB_TTBR1_S_SHIFT 1
+#define CB_TTBR1_RGN_SHIFT 3
+#define CB_TTBR1_NOS_SHIFT 5
+#define CB_TTBR1_IRGN0_SHIFT 6
+#define CB_TTBR1_ADDR_SHIFT 14
+#endif
+
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_pagetable.c b/drivers/iommu/qcom/msm_iommu_pagetable.c
new file mode 100644
index 0000000000000..1f11abb9db7bf
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_pagetable.c
@@ -0,0 +1,645 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cacheflush.h>
+
+#include <linux/qcom_iommu.h>
+#include "msm_iommu_priv.h"
+#include <trace/events/kmem.h>
+#include "msm_iommu_pagetable.h"
+
+#define NUM_FL_PTE 4096
+#define NUM_SL_PTE 256
+#define GUARD_PTE 2
+#define NUM_TEX_CLASS 8
+
+/* First-level page table bits */
+#define FL_BASE_MASK 0xFFFFFC00
+#define FL_TYPE_TABLE (1 << 0)
+#define FL_TYPE_SECT (2 << 0)
+#define FL_SUPERSECTION (1 << 18)
+#define FL_AP0 (1 << 10)
+#define FL_AP1 (1 << 11)
+#define FL_AP2 (1 << 15)
+#define FL_SHARED (1 << 16)
+#define FL_BUFFERABLE (1 << 2)
+#define FL_CACHEABLE (1 << 3)
+#define FL_TEX0 (1 << 12)
+#define FL_OFFSET(va) (((va) & 0xFFF00000) >> 20)
+#define FL_NG (1 << 17)
+
+/* Second-level page table bits */
+#define SL_BASE_MASK_LARGE 0xFFFF0000
+#define SL_BASE_MASK_SMALL 0xFFFFF000
+#define SL_TYPE_LARGE (1 << 0)
+#define SL_TYPE_SMALL (2 << 0)
+#define SL_AP0 (1 << 4)
+#define SL_AP1 (2 << 4)
+#define SL_AP2 (1 << 9)
+#define SL_SHARED (1 << 10)
+#define SL_BUFFERABLE (1 << 2)
+#define SL_CACHEABLE (1 << 3)
+#define SL_TEX0 (1 << 6)
+#define SL_OFFSET(va) (((va) & 0xFF000) >> 12)
+#define SL_NG (1 << 11)
+
+/* Memory type and cache policy attributes */
+#define MT_SO 0
+#define MT_DEV 1
+#define MT_IOMMU_NORMAL 2
+#define CP_NONCACHED 0
+#define CP_WB_WA 1
+#define CP_WT 2
+#define CP_WB_NWA 3
+
+/* Sharability attributes of MSM IOMMU mappings */
+#define MSM_IOMMU_ATTR_NON_SH 0x0
+#define MSM_IOMMU_ATTR_SH 0x4
+
+/* Cacheability attributes of MSM IOMMU mappings */
+#define MSM_IOMMU_ATTR_NONCACHED 0x0
+#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
+#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
+#define MSM_IOMMU_ATTR_CACHED_WT 0x3
+
+static int msm_iommu_tex_class[4];
+
+/* TEX Remap Registers */
+#define NMRR_ICP(nmrr, n) (((nmrr) & (3 << ((n) * 2))) >> ((n) * 2))
+#define NMRR_OCP(nmrr, n) (((nmrr) & (3 << ((n) * 2 + 16))) >> ((n) * 2 + 16))
+
+#define PRRR_NOS(prrr, n) ((prrr) & (1 << ((n) + 24)) ? 1 : 0)
+#define PRRR_MT(prrr, n) ((((prrr) & (3 << ((n) * 2))) >> ((n) * 2)))
+
+static inline void clean_pte(u32 *start, u32 *end, int redirect)
+{
+ if (!redirect)
+ dmac_flush_range(start, end);
+}
+
+int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt)
+{
+ pt->fl_table = (u32 *)__get_free_pages(GFP_KERNEL, get_order(SZ_16K));
+ if (!pt->fl_table)
+ return -ENOMEM;
+
+ pt->fl_table_shadow = (u32 *)__get_free_pages(GFP_KERNEL,
+ get_order(SZ_16K));
+ if (!pt->fl_table_shadow) {
+ free_pages((unsigned long)pt->fl_table, get_order(SZ_16K));
+ return -ENOMEM;
+ }
+
+ memset(pt->fl_table, 0, SZ_16K);
+ memset(pt->fl_table_shadow, 0, SZ_16K);
+ clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect);
+
+ return 0;
+}
+
+void msm_iommu_pagetable_free(struct msm_iommu_pt *pt)
+{
+ u32 *fl_table;
+ u32 *fl_table_shadow;
+ int i;
+
+ fl_table = pt->fl_table;
+ fl_table_shadow = pt->fl_table_shadow;
+ for (i = 0; i < NUM_FL_PTE; i++)
+ if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
+ free_page((unsigned long) __va(((fl_table[i]) &
+ FL_BASE_MASK)));
+ free_pages((unsigned long)fl_table, get_order(SZ_16K));
+ pt->fl_table = 0;
+
+ free_pages((unsigned long)fl_table_shadow, get_order(SZ_16K));
+ pt->fl_table_shadow = 0;
+}
+
+void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len)
+{
+ /*
+ * Adding 2 for worst case. We could be spanning 3 second level pages
+ * if we unmapped just over 1MB.
+ */
+ u32 n_entries = len / SZ_1M + 2;
+ u32 fl_offset = FL_OFFSET(va);
+ u32 i;
+
+ for (i = 0; i < n_entries && fl_offset < NUM_FL_PTE; ++i) {
+ u32 *fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+ void *sl_table_va = __va(((*fl_pte_shadow) & ~0x1FF));
+ u32 sl_table = *fl_pte_shadow;
+
+ if (sl_table && !(sl_table & 0x1FF)) {
+ free_pages((unsigned long) sl_table_va,
+ get_order(SZ_4K));
+ *fl_pte_shadow = 0;
+ }
+ ++fl_offset;
+ }
+}
+
+static int __get_pgprot(int prot, int len)
+{
+ unsigned int pgprot;
+ int tex;
+
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
+ prot |= IOMMU_READ | IOMMU_WRITE;
+ WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
+ }
+
+ if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
+ prot |= IOMMU_READ;
+ WARN_ONCE(1, "Write-only unsupported; falling back to RW\n");
+ }
+
+ if (prot & IOMMU_CACHE)
+ tex = (pgprot_val(PAGE_KERNEL) >> 2) & 0x07;
+ else
+ tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
+
+ if (tex < 0 || tex > NUM_TEX_CLASS - 1)
+ return 0;
+
+ if (len == SZ_16M || len == SZ_1M) {
+ pgprot = FL_SHARED;
+ pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
+ pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
+ pgprot |= tex & 0x04 ? FL_TEX0 : 0;
+ pgprot |= prot & IOMMU_PRIV ? FL_AP0 :
+ (FL_AP0 | FL_AP1);
+ pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
+ } else {
+ pgprot = SL_SHARED;
+ pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
+ pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
+ pgprot |= tex & 0x04 ? SL_TEX0 : 0;
+ pgprot |= prot & IOMMU_PRIV ? SL_AP0 :
+ (SL_AP0 | SL_AP1);
+ pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
+ }
+
+ return pgprot;
+}
+
+static u32 *make_second_level(struct msm_iommu_pt *pt, u32 *fl_pte,
+ u32 *fl_pte_shadow)
+{
+ u32 *sl;
+ sl = (u32 *) __get_free_pages(GFP_KERNEL,
+ get_order(SZ_4K));
+
+ if (!sl) {
+ pr_debug("Could not allocate second level table\n");
+ goto fail;
+ }
+ memset(sl, 0, SZ_4K);
+ clean_pte(sl, sl + NUM_SL_PTE + GUARD_PTE, pt->redirect);
+
+ *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
+ FL_TYPE_TABLE);
+ *fl_pte_shadow = *fl_pte & ~0x1FF;
+
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+fail:
+ return sl;
+}
+
+static int sl_4k(u32 *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+ int ret = 0;
+
+ if (*sl_pte) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
+ | SL_TYPE_SMALL | pgprot;
+fail:
+ return ret;
+}
+
+static int sl_64k(u32 *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+ int ret = 0;
+
+ int i;
+
+ for (i = 0; i < 16; i++)
+ if (*(sl_pte+i)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ for (i = 0; i < 16; i++)
+ *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
+ | SL_SHARED | SL_TYPE_LARGE | pgprot;
+
+fail:
+ return ret;
+}
+
+static inline int fl_1m(u32 *fl_pte, phys_addr_t pa, int pgprot)
+{
+ if (*fl_pte)
+ return -EBUSY;
+
+ *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
+ | pgprot;
+
+ return 0;
+}
+
+static inline int fl_16m(u32 *fl_pte, phys_addr_t pa, int pgprot)
+{
+ int i;
+ int ret = 0;
+ for (i = 0; i < 16; i++)
+ if (*(fl_pte+i)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+ for (i = 0; i < 16; i++)
+ *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
+ | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
+fail:
+ return ret;
+}
+
+int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ int ret;
+ struct scatterlist sg;
+
+ if (len != SZ_16M && len != SZ_1M &&
+ len != SZ_64K && len != SZ_4K) {
+ pr_debug("Bad size: %zd\n", len);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = pa;
+ sg.length = len;
+
+ ret = msm_iommu_pagetable_map_range(pt, va, &sg, len, prot);
+
+fail:
+ return ret;
+}
+
+size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len)
+{
+ msm_iommu_pagetable_unmap_range(pt, va, len);
+ return len;
+}
+
+static phys_addr_t get_phys_addr(struct scatterlist *sg)
+{
+ /*
+ * Try sg_dma_address first so that we can
+ * map carveout regions that do not have a
+ * struct page associated with them.
+ */
+ phys_addr_t pa = sg_dma_address(sg);
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+/*
+ * For debugging we may want to force mappings to be 4K only
+ */
+#ifdef CONFIG_IOMMU_FORCE_4K_MAPPINGS
+static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
+ int align)
+{
+ if (align == SZ_4K) {
+ return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
+ && (len >= align);
+ } else {
+ return 0;
+ }
+}
+#else
+static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
+ int align)
+{
+ return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
+ && (len >= align);
+}
+#endif
+
+int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
+ struct scatterlist *sg, unsigned int len, int prot)
+{
+ phys_addr_t pa;
+ unsigned int start_va = va;
+ unsigned int offset = 0;
+ u32 *fl_pte;
+ u32 *fl_pte_shadow;
+ u32 fl_offset;
+ u32 *sl_table = NULL;
+ u32 sl_offset, sl_start;
+ unsigned int chunk_size, chunk_offset = 0;
+ int ret = 0;
+ unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
+
+ BUG_ON(len & (SZ_4K - 1));
+
+ pgprot4k = __get_pgprot(prot, SZ_4K);
+ pgprot64k = __get_pgprot(prot, SZ_64K);
+ pgprot1m = __get_pgprot(prot, SZ_1M);
+ pgprot16m = __get_pgprot(prot, SZ_16M);
+ if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ fl_offset = FL_OFFSET(va); /* Upper 12 bits */
+ fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
+ fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+ pa = get_phys_addr(sg);
+
+ while (offset < len) {
+ chunk_size = SZ_4K;
+
+ if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_16M))
+ chunk_size = SZ_16M;
+ else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_1M))
+ chunk_size = SZ_1M;
+ /* 64k or 4k determined later */
+
+// trace_iommu_map_range(va, pa, sg->length, chunk_size);
+
+ /* for 1M and 16M, only first level entries are required */
+ if (chunk_size >= SZ_1M) {
+ if (chunk_size == SZ_16M) {
+ ret = fl_16m(fl_pte, pa, pgprot16m);
+ if (ret)
+ goto fail;
+ clean_pte(fl_pte, fl_pte + 16, pt->redirect);
+ fl_pte += 16;
+ fl_pte_shadow += 16;
+ } else if (chunk_size == SZ_1M) {
+ ret = fl_1m(fl_pte, pa, pgprot1m);
+ if (ret)
+ goto fail;
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ fl_pte++;
+ fl_pte_shadow++;
+ }
+
+ offset += chunk_size;
+ chunk_offset += chunk_size;
+ va += chunk_size;
+ pa += chunk_size;
+
+ if (chunk_offset >= sg->length && offset < len) {
+ chunk_offset = 0;
+ sg = sg_next(sg);
+ pa = get_phys_addr(sg);
+ }
+ continue;
+ }
+ /* for 4K or 64K, make sure there is a second level table */
+ if (*fl_pte == 0) {
+ if (!make_second_level(pt, fl_pte, fl_pte_shadow)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+ if (!(*fl_pte & FL_TYPE_TABLE)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_offset = SL_OFFSET(va);
+ /* Keep track of initial position so we
+ * don't clean more than we have to
+ */
+ sl_start = sl_offset;
+
+ /* Build the 2nd level page table */
+ while (offset < len && sl_offset < NUM_SL_PTE) {
+ /* Map a large 64K page if the chunk is large enough and
+ * the pa and va are aligned
+ */
+
+ if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_64K))
+ chunk_size = SZ_64K;
+ else
+ chunk_size = SZ_4K;
+
+// trace_iommu_map_range(va, pa, sg->length,
+// chunk_size);
+
+ if (chunk_size == SZ_4K) {
+ sl_4k(&sl_table[sl_offset], pa, pgprot4k);
+ sl_offset++;
+ /* Increment map count */
+ (*fl_pte_shadow)++;
+ } else {
+ BUG_ON(sl_offset + 16 > NUM_SL_PTE);
+ sl_64k(&sl_table[sl_offset], pa, pgprot64k);
+ sl_offset += 16;
+ /* Increment map count */
+ *fl_pte_shadow += 16;
+ }
+
+ offset += chunk_size;
+ chunk_offset += chunk_size;
+ va += chunk_size;
+ pa += chunk_size;
+
+ if (chunk_offset >= sg->length && offset < len) {
+ chunk_offset = 0;
+ sg = sg_next(sg);
+ pa = get_phys_addr(sg);
+ }
+ }
+
+ clean_pte(sl_table + sl_start, sl_table + sl_offset,
+ pt->redirect);
+ fl_pte++;
+ fl_pte_shadow++;
+ sl_offset = 0;
+ }
+
+fail:
+ if (ret && offset > 0)
+ msm_iommu_pagetable_unmap_range(pt, start_va, offset);
+
+ return ret;
+}
+
+void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
+ unsigned int len)
+{
+ unsigned int offset = 0;
+ u32 *fl_pte;
+ u32 *fl_pte_shadow;
+ u32 fl_offset;
+ u32 *sl_table;
+ u32 sl_start, sl_end;
+ int used;
+
+ BUG_ON(len & (SZ_4K - 1));
+
+ fl_offset = FL_OFFSET(va); /* Upper 12 bits */
+ fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
+ fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+
+ while (offset < len) {
+ if (*fl_pte & FL_TYPE_TABLE) {
+ unsigned int n_entries;
+
+ sl_start = SL_OFFSET(va);
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_end = ((len - offset) / SZ_4K) + sl_start;
+
+ if (sl_end > NUM_SL_PTE)
+ sl_end = NUM_SL_PTE;
+ n_entries = sl_end - sl_start;
+
+ memset(sl_table + sl_start, 0, n_entries * 4);
+ clean_pte(sl_table + sl_start, sl_table + sl_end,
+ pt->redirect);
+
+ offset += n_entries * SZ_4K;
+ va += n_entries * SZ_4K;
+
+ BUG_ON((*fl_pte_shadow & 0x1FF) < n_entries);
+
+ /* Decrement map count */
+ *fl_pte_shadow -= n_entries;
+ used = *fl_pte_shadow & 0x1FF;
+
+ if (!used) {
+ *fl_pte = 0;
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ }
+
+ sl_start = 0;
+ } else {
+ *fl_pte = 0;
+ *fl_pte_shadow = 0;
+
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ va += SZ_1M;
+ offset += SZ_1M;
+ sl_start = 0;
+ }
+ fl_pte++;
+ fl_pte_shadow++;
+ }
+}
+
+phys_addr_t msm_iommu_iova_to_phys_soft(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_pt *pt = &priv->pt;
+ u32 *fl_pte;
+ u32 fl_offset;
+ u32 *sl_table = NULL;
+ u32 sl_offset;
+ u32 *sl_pte;
+
+ if (!pt->fl_table) {
+ pr_err("Page table doesn't exist\n");
+ return 0;
+ }
+
+ fl_offset = FL_OFFSET(va);
+ fl_pte = pt->fl_table + fl_offset;
+
+ if (*fl_pte & FL_TYPE_TABLE) {
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_offset = SL_OFFSET(va);
+ sl_pte = sl_table + sl_offset;
+ /* 64 KB section */
+ if (*sl_pte & SL_TYPE_LARGE)
+ return (*sl_pte & 0xFFFF0000) | (va & ~0xFFFF0000);
+ /* 4 KB section */
+ if (*sl_pte & SL_TYPE_SMALL)
+ return (*sl_pte & 0xFFFFF000) | (va & ~0xFFFFF000);
+ } else {
+ /* 16 MB section */
+ if (*fl_pte & FL_SUPERSECTION)
+ return (*fl_pte & 0xFF000000) | (va & ~0xFF000000);
+ /* 1 MB section */
+ if (*fl_pte & FL_TYPE_SECT)
+ return (*fl_pte & 0xFFF00000) | (va & ~0xFFF00000);
+ }
+ return 0;
+}
+
+static int __init get_tex_class(int icp, int ocp, int mt, int nos)
+{
+ int i = 0;
+ unsigned int prrr;
+ unsigned int nmrr;
+ int c_icp, c_ocp, c_mt, c_nos;
+
+ prrr = msm_iommu_get_prrr();
+ nmrr = msm_iommu_get_nmrr();
+
+ for (i = 0; i < NUM_TEX_CLASS; i++) {
+ c_nos = PRRR_NOS(prrr, i);
+ c_mt = PRRR_MT(prrr, i);
+ c_icp = NMRR_ICP(nmrr, i);
+ c_ocp = NMRR_OCP(nmrr, i);
+
+ if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static void __init setup_iommu_tex_classes(void)
+{
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
+ get_tex_class(CP_NONCACHED, CP_NONCACHED,
+ MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
+ get_tex_class(CP_WB_WA, CP_WB_WA, MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
+ get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
+ get_tex_class(CP_WT, CP_WT, MT_IOMMU_NORMAL, 1);
+}
+
+void __init msm_iommu_pagetable_init(void)
+{
+ setup_iommu_tex_classes();
+}
diff --git a/drivers/iommu/qcom/msm_iommu_pagetable.h b/drivers/iommu/qcom/msm_iommu_pagetable.h
new file mode 100644
index 0000000000000..12a8d274f95e7
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_pagetable.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H
+
+struct msm_iommu_pt;
+
+void msm_iommu_pagetable_init(void);
+int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt);
+void msm_iommu_pagetable_free(struct msm_iommu_pt *pt);
+int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
+ phys_addr_t pa, size_t len, int prot);
+size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len);
+int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
+ struct scatterlist *sg, unsigned int len, int prot);
+void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
+ unsigned int len);
+phys_addr_t msm_iommu_iova_to_phys_soft(struct iommu_domain *domain,
+ phys_addr_t va);
+void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len);
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_perfmon.h b/drivers/iommu/qcom/msm_iommu_perfmon.h
new file mode 100644
index 0000000000000..45683f4ebd886
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_perfmon.h
@@ -0,0 +1,233 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/irqreturn.h>
+
+#ifndef MSM_IOMMU_PERFMON_H
+#define MSM_IOMMU_PERFMON_H
+
+/**
+ * struct iommu_pmon_counter - container for a performance counter.
+ * @counter_no: counter number within the group
+ * @absolute_counter_no: counter number within IOMMU PMU
+ * @value: cached counter value
+ * @overflow_count: no of times counter has overflowed
+ * @enabled: indicates whether counter is enabled or not
+ * @current_event_class: current selected event class, -1 if none
+ * @counter_dir: debugfs directory for this counter
+ * @cnt_group: group this counter belongs to
+ */
+struct iommu_pmon_counter {
+ unsigned int counter_no;
+ unsigned int absolute_counter_no;
+ unsigned long value;
+ unsigned long overflow_count;
+ unsigned int enabled;
+ int current_event_class;
+ struct dentry *counter_dir;
+ struct iommu_pmon_cnt_group *cnt_group;
+};
+
+/**
+ * struct iommu_pmon_cnt_group - container for a perf mon counter group.
+ * @grp_no: group number
+ * @num_counters: number of counters in this group
+ * @counters: list of counter in this group
+ * @group_dir: debugfs directory for this group
+ * @pmon: pointer to the iommu_pmon object this group belongs to
+ */
+struct iommu_pmon_cnt_group {
+ unsigned int grp_no;
+ unsigned int num_counters;
+ struct iommu_pmon_counter *counters;
+ struct dentry *group_dir;
+ struct iommu_pmon *pmon;
+};
+
+/**
+ * struct iommu_info - container for a perf mon iommu info.
+ * @iommu_name: name of the iommu from device tree
+ * @base: virtual base address for this iommu
+ * @evt_irq: irq number for event overflow interrupt
+ * @iommu_dev: pointer to iommu device
+ * @ops: iommu access operations pointer.
+ * @hw_ops: iommu pm hw access operations pointer.
+ * @always_on: 1 if iommu is always on, 0 otherwise.
+ */
+struct iommu_info {
+ const char *iommu_name;
+ void *base;
+ int evt_irq;
+ struct device *iommu_dev;
+ struct iommu_access_ops *ops;
+ struct iommu_pm_hw_ops *hw_ops;
+ unsigned int always_on;
+};
+
+/**
+ * struct iommu_pmon - main container for a perf mon data.
+ * @iommu_dir: debugfs directory for this iommu
+ * @iommu: iommu_info instance
+ * @iommu_list: iommu_list head
+ * @cnt_grp: list of counter groups
+ * @num_groups: number of counter groups
+ * @num_counters: number of counters per group
+ * @event_cls_supported: an array of event classes supported for this PMU
+ * @nevent_cls_supported: number of event classes supported.
+ * @enabled: Indicates whether perf. mon is enabled or not
+ * @iommu_attached Indicates whether iommu is attached or not.
+ * @lock: mutex used to synchronize access to shared data
+ */
+struct iommu_pmon {
+ struct dentry *iommu_dir;
+ struct iommu_info iommu;
+ struct list_head iommu_list;
+ struct iommu_pmon_cnt_group *cnt_grp;
+ u32 num_groups;
+ u32 num_counters;
+ u32 *event_cls_supported;
+ u32 nevent_cls_supported;
+ unsigned int enabled;
+ unsigned int iommu_attach_count;
+ struct mutex lock;
+};
+
+/**
+ * struct iommu_hw_ops - Callbacks for accessing IOMMU HW
+ * @initialize_hw: Call to do any initialization before enabling ovf interrupts
+ * @is_hw_access_ok: Returns 1 if we can access HW, 0 otherwise
+ * @grp_enable: Call to enable a counter group
+ * @grp_disable: Call to disable a counter group
+ * @enable_pm: Call to enable PM
+ * @disable_pm: Call to disable PM
+ * @reset_counters: Call to reset counters
+ * @check_for_overflow: Call to check for overflow
+ * @evt_ovfl_int_handler: Overflow interrupt handler callback
+ * @counter_enable: Call to enable counters
+ * @counter_disable: Call to disable counters
+ * @ovfl_int_enable: Call to enable overflow interrupts
+ * @ovfl_int_disable: Call to disable overflow interrupts
+ * @set_event_class: Call to set event class
+ * @read_counter: Call to read a counter value
+ */
+struct iommu_pm_hw_ops {
+ void (*initialize_hw)(const struct iommu_pmon *);
+ unsigned int (*is_hw_access_OK)(const struct iommu_pmon *);
+ void (*grp_enable)(struct iommu_info *, unsigned int);
+ void (*grp_disable)(struct iommu_info *, unsigned int);
+ void (*enable_pm)(struct iommu_info *);
+ void (*disable_pm)(struct iommu_info *);
+ void (*reset_counters)(const struct iommu_info *);
+ void (*check_for_overflow)(struct iommu_pmon *);
+ irqreturn_t (*evt_ovfl_int_handler)(int, void *);
+ void (*counter_enable)(struct iommu_info *,
+ struct iommu_pmon_counter *);
+ void (*counter_disable)(struct iommu_info *,
+ struct iommu_pmon_counter *);
+ void (*ovfl_int_enable)(struct iommu_info *,
+ const struct iommu_pmon_counter *);
+ void (*ovfl_int_disable)(struct iommu_info *,
+ const struct iommu_pmon_counter *);
+ void (*set_event_class)(struct iommu_pmon *pmon, unsigned int,
+ unsigned int);
+ unsigned int (*read_counter)(struct iommu_pmon_counter *);
+};
+
+#define MSM_IOMMU_PMU_NO_EVENT_CLASS -1
+
+#ifdef CONFIG_MSM_IOMMU_PMON
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv0 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void);
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv1 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void);
+
+/**
+ * Allocate memory for performance monitor structure. Must
+ * be called before iommu_pm_iommu_register
+ */
+struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev);
+
+/**
+ * Free memory previously allocated with iommu_pm_alloc
+ */
+void msm_iommu_pm_free(struct device *iommu_dev);
+
+/**
+ * Register iommu with the performance monitor module.
+ */
+int msm_iommu_pm_iommu_register(struct iommu_pmon *info);
+
+/**
+ * Unregister iommu with the performance monitor module.
+ */
+void msm_iommu_pm_iommu_unregister(struct device *dev);
+
+/**
+ * Called by iommu driver when attaching is complete
+ * Must NOT be called with IOMMU mutexes held.
+ * @param iommu_dev IOMMU device that is attached
+ */
+void msm_iommu_attached(struct device *dev);
+
+/**
+ * Called by iommu driver before detaching.
+ * Must NOT be called with IOMMU mutexes held.
+ * @param iommu_dev IOMMU device that is going to be detached
+ */
+void msm_iommu_detached(struct device *dev);
+#else
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void)
+{
+ return NULL;
+}
+
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void)
+{
+ return NULL;
+}
+
+static inline struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev)
+{
+ return NULL;
+}
+
+static inline void msm_iommu_pm_free(struct device *iommu_dev)
+{
+ return;
+}
+
+static inline int msm_iommu_pm_iommu_register(struct iommu_pmon *info)
+{
+ return -EIO;
+}
+
+static inline void msm_iommu_pm_iommu_unregister(struct device *dev)
+{
+}
+
+static inline void msm_iommu_attached(struct device *dev)
+{
+}
+
+static inline void msm_iommu_detached(struct device *dev)
+{
+}
+#endif
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_priv.h b/drivers/iommu/qcom/msm_iommu_priv.h
new file mode 100644
index 0000000000000..4de0d7ef19e63
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_priv.h
@@ -0,0 +1,71 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_IOMMU_PRIV_H
+#define MSM_IOMMU_PRIV_H
+
+/**
+ * struct msm_iommu_pt - Container for first level page table and its
+ * attributes.
+ * fl_table: Pointer to the first level page table.
+ * redirect: Set to 1 if L2 redirect for page tables are enabled, 0 otherwise.
+ * unaligned_fl_table: Original address of memory for the page table.
+ * fl_table is manually aligned (as per spec) but we need the original address
+ * to free the table.
+ * fl_table_shadow: This is "copy" of the fl_table with some differences.
+ * It stores the same information as fl_table except that instead of storing
+ * second level page table address + page table entry descriptor bits it
+ * stores the second level page table address and the number of used second
+ * level page tables entries. This is used to check whether we need to free
+ * the second level page table which allows us to also free the second level
+ * page table after doing a TLB invalidate which should catch bugs with
+ * clients trying to unmap an address that is being used.
+ * fl_table_shadow will use the lower 9 bits for the use count and the upper
+ * bits for the second level page table address.
+ * sl_table_shadow uses the same concept as fl_table_shadow but for LPAE 2nd
+ * level page tables.
+ */
+#ifdef CONFIG_IOMMU_LPAE
+struct msm_iommu_pt {
+ u64 *fl_table;
+ u64 **sl_table_shadow;
+ int redirect;
+ u64 *unaligned_fl_table;
+};
+#else
+struct msm_iommu_pt {
+ u32 *fl_table;
+ int redirect;
+ u32 *fl_table_shadow;
+};
+#endif
+/**
+ * struct msm_iommu_priv - Container for page table attributes and other
+ * private iommu domain information.
+ * attributes.
+ * pt: Page table attribute structure
+ * list_attached: List of devices (contexts) attached to this domain.
+ * client_name: Name of the domain client.
+ */
+struct msm_iommu_priv {
+ struct msm_iommu_pt pt;
+ struct list_head list_attached;
+ struct iommu_domain domain;
+ const char *client_name;
+};
+
+static inline struct msm_iommu_priv *to_msm_priv(struct iommu_domain *dom)
+{
+ return container_of(dom, struct msm_iommu_priv, domain);
+}
+
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_sec.c b/drivers/iommu/qcom/msm_iommu_sec.c
new file mode 100644
index 0000000000000..573245d4a57ef
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_sec.c
@@ -0,0 +1,795 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/kmemleak.h>
+#include <linux/dma-mapping.h>
+#include <linux/qcom_scm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/sizes.h>
+
+#include "msm_iommu_perfmon.h"
+#include "msm_iommu_hw-v1.h"
+#include "msm_iommu_priv.h"
+#include <linux/qcom_iommu.h>
+#include <trace/events/kmem.h>
+
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
+/* commands for SCM_SVC_MP */
+#define IOMMU_SECURE_CFG 2
+#define IOMMU_SECURE_PTBL_SIZE 3
+#define IOMMU_SECURE_PTBL_INIT 4
+#define IOMMU_SET_CP_POOL_SIZE 5
+#define IOMMU_SECURE_MAP 6
+#define IOMMU_SECURE_UNMAP 7
+#define IOMMU_SECURE_MAP2 0x0B
+#define IOMMU_SECURE_MAP2_FLAT 0x12
+#define IOMMU_SECURE_UNMAP2 0x0C
+#define IOMMU_SECURE_UNMAP2_FLAT 0x13
+#define IOMMU_TLBINVAL_FLAG 0x00000001
+
+/* commands for SCM_SVC_UTIL */
+#define IOMMU_DUMP_SMMU_FAULT_REGS 0X0C
+#define SCM_SVC_MP 0xc
+
+static struct iommu_access_ops *iommu_access_ops;
+static int is_secure;
+
+static const struct of_device_id msm_smmu_list[] = {
+ { .compatible = "qcom,msm-smmu-v1", },
+ { .compatible = "qcom,msm-smmu-v2", },
+ { }
+};
+
+struct msm_scm_paddr_list {
+ unsigned int list;
+ unsigned int list_size;
+ unsigned int size;
+};
+
+struct msm_scm_mapping_info {
+ unsigned int id;
+ unsigned int ctx_id;
+ unsigned int va;
+ unsigned int size;
+};
+
+struct msm_scm_map2_req {
+ struct msm_scm_paddr_list plist;
+ struct msm_scm_mapping_info info;
+ unsigned int flags;
+};
+
+struct msm_scm_unmap2_req {
+ struct msm_scm_mapping_info info;
+ unsigned int flags;
+};
+
+struct msm_cp_pool_size {
+ uint32_t size;
+ uint32_t spare;
+};
+
+#define NUM_DUMP_REGS 14
+/*
+ * some space to allow the number of registers returned by the secure
+ * environment to grow
+ */
+#define WIGGLE_ROOM (NUM_DUMP_REGS * 2)
+/* Each entry is a (reg_addr, reg_val) pair, hence the * 2 */
+#define SEC_DUMP_SIZE ((NUM_DUMP_REGS * 2) + WIGGLE_ROOM)
+
+struct msm_scm_fault_regs_dump {
+ uint32_t dump_size;
+ uint32_t dump_data[SEC_DUMP_SIZE];
+} __aligned(PAGE_SIZE);
+
+void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops)
+{
+ iommu_access_ops = access_ops;
+}
+
+static int msm_iommu_dump_fault_regs(int smmu_id, int cb_num,
+ struct msm_scm_fault_regs_dump *regs)
+{
+ int ret;
+
+ dmac_clean_range(regs, regs + 1);
+
+ ret = qcom_scm_iommu_dump_fault_regs(smmu_id, cb_num,
+ virt_to_phys(regs), sizeof(*regs));
+
+ dmac_inv_range(regs, regs + 1);
+
+ return ret;
+}
+
+static int msm_iommu_reg_dump_to_regs(
+ struct msm_iommu_context_reg ctx_regs[],
+ struct msm_scm_fault_regs_dump *dump, struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ int i, j, ret = 0;
+ const uint32_t nvals = (dump->dump_size / sizeof(uint32_t));
+ uint32_t *it = (uint32_t *) dump->dump_data;
+ const uint32_t * const end = ((uint32_t *) dump) + nvals;
+ phys_addr_t phys_base = drvdata->phys_base;
+ int ctx = ctx_drvdata->num;
+
+ if (!nvals)
+ return -EINVAL;
+
+ for (i = 1; it < end; it += 2, i += 2) {
+ unsigned int reg_offset;
+ uint32_t addr = *it;
+ uint32_t val = *(it + 1);
+ struct msm_iommu_context_reg *reg = NULL;
+ if (addr < phys_base) {
+ pr_err("Bogus-looking register (0x%x) for Iommu with base at %pa. Skipping.\n",
+ addr, &phys_base);
+ continue;
+ }
+ reg_offset = addr - phys_base;
+
+ for (j = 0; j < MAX_DUMP_REGS; ++j) {
+ struct dump_regs_tbl_entry dump_reg = dump_regs_tbl[j];
+ void *test_reg;
+ unsigned int test_offset;
+ switch (dump_reg.dump_reg_type) {
+ case DRT_CTX_REG:
+ test_reg = CTX_REG(dump_reg.reg_offset,
+ drvdata->cb_base, ctx);
+ break;
+ case DRT_GLOBAL_REG:
+ test_reg = GLB_REG(
+ dump_reg.reg_offset, drvdata->glb_base);
+ break;
+ case DRT_GLOBAL_REG_N:
+ test_reg = GLB_REG_N(
+ drvdata->glb_base, ctx,
+ dump_reg.reg_offset);
+ break;
+ default:
+ pr_err("Unknown dump_reg_type: 0x%x\n",
+ dump_reg.dump_reg_type);
+ BUG();
+ break;
+ }
+ test_offset = test_reg - drvdata->glb_base;
+ if (test_offset == reg_offset) {
+ reg = &ctx_regs[j];
+ break;
+ }
+ }
+
+ if (reg == NULL) {
+ pr_debug("Unknown register in secure CB dump: %x\n",
+ addr);
+ continue;
+ }
+
+ if (reg->valid) {
+ WARN(1, "Invalid (repeated?) register in CB dump: %x\n",
+ addr);
+ continue;
+ }
+
+ reg->val = val;
+ reg->valid = true;
+ }
+
+ if (i != nvals) {
+ pr_err("Invalid dump! %d != %d\n", i, nvals);
+ ret = 1;
+ }
+
+ for (i = 0; i < MAX_DUMP_REGS; ++i) {
+ if (!ctx_regs[i].valid) {
+ if (dump_regs_tbl[i].must_be_present) {
+ pr_err("Register missing from dump for ctx %d: %s, 0x%x\n",
+ ctx,
+ dump_regs_tbl[i].name,
+ dump_regs_tbl[i].reg_offset);
+ ret = 1;
+ }
+ ctx_regs[i].val = 0xd00dfeed;
+ }
+ }
+
+ return ret;
+}
+
+irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_scm_fault_regs_dump *regs;
+ int tmp, ret = IRQ_HANDLED;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+ BUG_ON(!drvdata);
+
+ ctx_drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!ctx_drvdata);
+
+ regs = kzalloc(sizeof(*regs), GFP_KERNEL);
+ if (!regs) {
+ pr_err("%s: Couldn't allocate memory\n", __func__);
+ goto lock_release;
+ }
+
+ if (!drvdata->ctx_attach_count) {
+ pr_err("Unexpected IOMMU page fault from secure context bank!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Unable to read page fault information\n");
+ /*
+ * We cannot determine which context bank caused the issue so
+ * we just return handled here to ensure IRQ handler code is
+ * happy
+ */
+ goto free_regs;
+ }
+
+ iommu_access_ops->iommu_clk_on(drvdata);
+ tmp = msm_iommu_dump_fault_regs(drvdata->sec_id,
+ ctx_drvdata->num, regs);
+ iommu_access_ops->iommu_clk_off(drvdata);
+
+ if (tmp) {
+ pr_err("%s: Couldn't dump fault registers (%d) %s, ctx: %d\n",
+ __func__, tmp, drvdata->name, ctx_drvdata->num);
+ goto free_regs;
+ } else {
+ struct msm_iommu_context_reg ctx_regs[MAX_DUMP_REGS];
+ memset(ctx_regs, 0, sizeof(ctx_regs));
+ tmp = msm_iommu_reg_dump_to_regs(
+ ctx_regs, regs, drvdata, ctx_drvdata);
+ if (tmp < 0) {
+ ret = IRQ_NONE;
+ pr_err("Incorrect response from secure environment\n");
+ goto free_regs;
+ }
+
+ if (ctx_regs[DUMP_REG_FSR].val) {
+ if (tmp)
+ pr_err("Incomplete fault register dump. Printout will be incomplete.\n");
+ if (!ctx_drvdata->attached_domain) {
+ pr_err("Bad domain in interrupt handler\n");
+ tmp = -ENOSYS;
+ } else {
+ tmp = report_iommu_fault(
+ ctx_drvdata->attached_domain,
+ &ctx_drvdata->pdev->dev,
+ COMBINE_DUMP_REG(
+ ctx_regs[DUMP_REG_FAR1].val,
+ ctx_regs[DUMP_REG_FAR0].val),
+ 0);
+ }
+
+ /* if the fault wasn't handled by someone else: */
+ if (tmp == -ENOSYS) {
+ pr_err("Unexpected IOMMU page fault from secure context bank!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ print_ctx_regs(ctx_regs);
+ }
+ } else {
+ ret = IRQ_NONE;
+ }
+ }
+free_regs:
+ kfree(regs);
+lock_release:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+int msm_iommu_sec_program_iommu(struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ if (drvdata->smmu_local_base) {
+ writel_relaxed(0xFFFFFFFF,
+ drvdata->smmu_local_base + SMMU_INTR_SEL_NS);
+ mb();
+ }
+
+ return qcom_scm_restore_sec_cfg(drvdata->sec_id, ctx_drvdata->num);
+}
+
+static int msm_iommu_sec_map2(struct msm_scm_map2_req *map)
+{
+ u32 flags;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ flags = IOMMU_TLBINVAL_FLAG;
+#else
+ flags = 0;
+#endif
+
+ return qcom_scm_iommu_secure_map(map->plist.list,
+ map->plist.list_size,
+ map->plist.size,
+ map->info.id,
+ map->info.ctx_id,
+ map->info.va,
+ map->info.size,
+ flags);
+}
+
+static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, phys_addr_t pa, size_t len)
+{
+ struct msm_scm_map2_req map;
+ void *flush_va, *flush_va_end;
+ int ret = 0;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M) ||
+ !IS_ALIGNED(pa, SZ_1M))
+ return -EINVAL;
+ map.plist.list = virt_to_phys(&pa);
+ map.plist.list_size = 1;
+ map.plist.size = len;
+ map.info.id = iommu_drvdata->sec_id;
+ map.info.ctx_id = ctx_drvdata->num;
+ map.info.va = va;
+ map.info.size = len;
+
+ flush_va = &pa;
+ flush_va_end = (void *)
+ (((unsigned long) flush_va) + sizeof(phys_addr_t));
+
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+ dmac_clean_range(flush_va, flush_va_end);
+
+ ret = msm_iommu_sec_map2(&map);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned int get_phys_addr(struct scatterlist *sg)
+{
+ /*
+ * Try sg_dma_address first so that we can
+ * map carveout regions that do not have a
+ * struct page associated with them.
+ */
+ unsigned int pa = sg_dma_address(sg);
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, struct scatterlist *sg, size_t len)
+{
+ struct scatterlist *sgiter;
+ struct msm_scm_map2_req map;
+ unsigned int *pa_list = 0;
+ unsigned int pa, cnt;
+ void *flush_va, *flush_va_end;
+ unsigned int offset = 0, chunk_offset = 0;
+ int ret;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ map.info.id = iommu_drvdata->sec_id;
+ map.info.ctx_id = ctx_drvdata->num;
+ map.info.va = va;
+ map.info.size = len;
+
+ if (sg->length == len) {
+ /*
+ * physical address for secure mapping needs
+ * to be 1MB aligned
+ */
+ pa = get_phys_addr(sg);
+ if (!IS_ALIGNED(pa, SZ_1M))
+ return -EINVAL;
+ map.plist.list = virt_to_phys(&pa);
+ map.plist.list_size = 1;
+ map.plist.size = len;
+ flush_va = &pa;
+ } else {
+ sgiter = sg;
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt = sg->length / SZ_1M;
+ while ((sgiter = sg_next(sgiter))) {
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt += sgiter->length / SZ_1M;
+ }
+
+ pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL);
+ if (!pa_list)
+ return -ENOMEM;
+
+ sgiter = sg;
+ cnt = 0;
+ pa = get_phys_addr(sgiter);
+ if (!IS_ALIGNED(pa, SZ_1M)) {
+ kfree(pa_list);
+ return -EINVAL;
+ }
+ while (offset < len) {
+ pa += chunk_offset;
+ pa_list[cnt] = pa;
+ chunk_offset += SZ_1M;
+ offset += SZ_1M;
+ cnt++;
+
+ if (chunk_offset >= sgiter->length && offset < len) {
+ chunk_offset = 0;
+ sgiter = sg_next(sgiter);
+ pa = get_phys_addr(sgiter);
+ }
+ }
+
+ map.plist.list = virt_to_phys(pa_list);
+ map.plist.list_size = cnt;
+ map.plist.size = SZ_1M;
+ flush_va = pa_list;
+ }
+
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+ flush_va_end = (void *) (((unsigned long) flush_va) +
+ (map.plist.list_size * sizeof(*pa_list)));
+ dmac_clean_range(flush_va, flush_va_end);
+
+ ret = msm_iommu_sec_map2(&map);
+ kfree(pa_list);
+
+ return ret;
+}
+
+static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, size_t len)
+{
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ return qcom_scm_iommu_secure_unmap(iommu_drvdata->sec_id,
+ ctx_drvdata->num,
+ va,
+ len,
+ IOMMU_TLBINVAL_FLAG);
+}
+
+static struct iommu_domain * msm_iommu_domain_alloc(unsigned type)
+{
+ struct msm_iommu_priv *priv;
+ struct iommu_domain *domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ INIT_LIST_HEAD(&priv->list_attached);
+ domain = &priv->domain;
+ return domain;
+}
+
+static void msm_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+ priv = to_msm_priv(domain);
+
+ kfree(priv);
+ iommu_access_ops->iommu_lock_release(0);
+}
+
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_ctx_drvdata *tmp_drvdata;
+ int ret = 0;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ priv = to_msm_priv(domain);
+ if (!priv || !dev) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!list_empty(&ctx_drvdata->attached_elm)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
+ if (tmp_drvdata == ctx_drvdata) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ ret = iommu_access_ops->iommu_power_on(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ /* We can only do this once */
+ if (!iommu_drvdata->ctx_attach_count) {
+ ret = iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ if (ret) {
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ goto fail;
+ }
+
+ ret = msm_iommu_sec_program_iommu(iommu_drvdata,
+ ctx_drvdata);
+
+ /* bfb settings are always programmed by HLOS */
+ program_iommu_bfb_settings(iommu_drvdata->base,
+ iommu_drvdata->bfb_settings);
+
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+ if (ret) {
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ goto fail;
+ }
+ }
+
+ list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
+ ctx_drvdata->attached_domain = domain;
+ ++iommu_drvdata->ctx_attach_count;
+
+ iommu_access_ops->iommu_lock_release(0);
+
+ msm_iommu_attached(dev->parent);
+ return ret;
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+
+ if (!dev)
+ return;
+
+ msm_iommu_detached(dev->parent);
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
+ goto fail;
+
+ list_del_init(&ctx_drvdata->attached_elm);
+ ctx_drvdata->attached_domain = NULL;
+
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ BUG_ON(iommu_drvdata->ctx_attach_count == 0);
+ --iommu_drvdata->ctx_attach_count;
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+}
+
+static int get_drvdata(struct iommu_domain *domain,
+ struct msm_iommu_drvdata **iommu_drvdata,
+ struct msm_iommu_ctx_drvdata **ctx_drvdata)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_ctx_drvdata *ctx;
+
+ list_for_each_entry(ctx, &priv->list_attached, attached_elm) {
+ if (ctx->attached_domain == domain)
+ break;
+ }
+
+ if (ctx->attached_domain != domain)
+ return -EINVAL;
+
+ *ctx_drvdata = ctx;
+ *iommu_drvdata = dev_get_drvdata(ctx->pdev->dev.parent);
+ return 0;
+}
+
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_map(iommu_drvdata, ctx_drvdata,
+ va, pa, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = -ENODEV;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata,
+ va, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
+}
+
+static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
+ struct scatterlist *sg, unsigned int len,
+ int prot)
+{
+ int ret;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_map_range(iommu_drvdata, ctx_drvdata,
+ va, sg, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+ unsigned int len)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = -EINVAL;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, va, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret ? ret : 0;
+}
+
+static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ return 0;
+}
+
+void msm_iommu_check_scm_call_avail(void)
+{
+ is_secure = qcom_scm_is_call_available(SCM_SVC_MP, IOMMU_SECURE_CFG);
+}
+
+int msm_iommu_get_scm_call_avail(void)
+{
+ return is_secure;
+}
+
+static struct iommu_ops msm_iommu_ops = {
+ .domain_alloc = msm_iommu_domain_alloc,
+ .domain_free = msm_iommu_domain_free,
+ .attach_dev = msm_iommu_attach_dev,
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+/* .map_range = msm_iommu_map_range,*/
+ .map_sg = default_iommu_map_sg,
+/* .unmap_range = msm_iommu_unmap_range,*/
+ .iova_to_phys = msm_iommu_iova_to_phys,
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
+};
+
+static int __init msm_iommu_sec_init(void)
+{
+ int ret;
+
+ ret = bus_register(&msm_iommu_sec_bus_type);
+ if (ret)
+ return ret;
+
+ ret = bus_set_iommu(&msm_iommu_sec_bus_type, &msm_iommu_ops);
+ if (ret) {
+ bus_unregister(&msm_iommu_sec_bus_type);
+ return ret;
+ }
+
+ return 0;
+}
+
+subsys_initcall(msm_iommu_sec_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU Secure Driver");
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 02b2903fe9d27..6aeb81643f47d 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -819,8 +819,8 @@ EXPORT_SYMBOL(of_io_request_and_map);
* CPU addr (phys_addr_t) : pna cells
* size : nsize cells
*
- * It returns -ENODEV if "dma-ranges" property was not found
- * for this device in DT.
+ * Return 0 on success, -ENODEV if the "dma-ranges" property was not found for
+ * this device in DT, or -EINVAL if the CPU address or size is invalid.
*/
int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size)
{
@@ -880,6 +880,22 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
*dma_addr = dmaaddr;
*size = of_read_number(ranges + naddr + pna, nsize);
+ /*
+ * DT nodes sometimes incorrectly set the size as a mask. Work around
+ * those incorrect DT by computing the size as mask + 1.
+ */
+ if (*size & 1) {
+ pr_warn("%s: size 0x%llx for dma-range in node(%s) set as mask\n",
+ __func__, *size, np->full_name);
+ *size = *size + 1;
+ }
+
+ if (!*size) {
+ pr_err("%s: invalid size zero for dma-range in node(%s)\n",
+ __func__, np->full_name);
+ ret = -EINVAL;
+ goto out;
+ }
pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
*dma_addr, *paddr, *size);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index fd5cfad7c4030..92e02dc1f4983 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -71,7 +71,7 @@ int of_device_add(struct platform_device *ofdev)
}
/**
- * of_dma_configure - Setup DMA configuration
+ * of_dma_configure - Setup DMA masks and offset
* @dev: Device to apply DMA configuration
* @np: Pointer to OF node having DMA configuration
*
@@ -82,13 +82,11 @@ int of_device_add(struct platform_device *ofdev)
* can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
* to fix up DMA configuration.
*/
-void of_dma_configure(struct device *dev, struct device_node *np)
+void of_dma_configure_masks(struct device *dev, struct device_node *np)
{
- u64 dma_addr, paddr, size;
- int ret;
- bool coherent;
+ u64 dma_addr, paddr, size, range_mask;
unsigned long offset;
- const struct iommu_ops *iommu;
+ int ret;
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
@@ -106,25 +104,11 @@ void of_dma_configure(struct device *dev, struct device_node *np)
ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
if (ret < 0) {
- dma_addr = offset = 0;
- size = dev->coherent_dma_mask + 1;
+ range_mask = dev->coherent_dma_mask + 1;
+ offset = 0;
} else {
+ range_mask = DMA_BIT_MASK(ilog2(dma_addr + size));
offset = PFN_DOWN(paddr - dma_addr);
-
- /*
- * Add a work around to treat the size as mask + 1 in case
- * it is defined in DT as a mask.
- */
- if (size & 1) {
- dev_warn(dev, "Invalid size 0x%llx for dma-range\n",
- size);
- size = size + 1;
- }
-
- if (!size) {
- dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
- return;
- }
dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
}
@@ -134,22 +118,59 @@ void of_dma_configure(struct device *dev, struct device_node *np)
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
- dev->coherent_dma_mask = min(dev->coherent_dma_mask,
- DMA_BIT_MASK(ilog2(dma_addr + size)));
- *dev->dma_mask = min((*dev->dma_mask),
- DMA_BIT_MASK(ilog2(dma_addr + size)));
+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, range_mask);
+ *dev->dma_mask = min((*dev->dma_mask), range_mask);
+}
+EXPORT_SYMBOL_GPL(of_dma_configure_masks);
+
+/**
+ * of_dma_configure_ops - Setup DMA operations
+ * @dev: Device to apply DMA configuration
+ * @np: Pointer to OF node having DMA configuration
+ *
+ * Try to get devices's DMA configuration from DT and update it
+ * accordingly.
+ */
+int of_dma_configure_ops(struct device *dev, struct device_node *np)
+{
+ u64 dma_addr, paddr, size;
+ const struct iommu_ops *iommu;
+ bool coherent;
+ int ret;
+
+ ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
+ if (ret < 0) {
+ dma_addr = 0;
+ size = dev->coherent_dma_mask + 1;
+ }
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
coherent ? " " : " not ");
iommu = of_iommu_configure(dev, np);
+ if (IS_ERR(iommu))
+ return PTR_ERR(iommu);
dev_dbg(dev, "device is%sbehind an iommu\n",
iommu ? " " : " not ");
arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_dma_configure_ops);
+
+/**
+ * of_dma_deconfigure - Clean up DMA configuration
+ * @dev: Device for which to clean up DMA configuration
+ *
+ * Clean up all configuration performed by of_dma_configure_ops() and free all
+ * resources that have been allocated.
+ */
+void of_dma_deconfigure(struct device *dev)
+{
+ arch_teardown_dma_ops(dev);
}
-EXPORT_SYMBOL_GPL(of_dma_configure);
int of_device_register(struct platform_device *pdev)
{
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index a2e68f740edac..38521a3ef68b3 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/of_pci.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -592,13 +593,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
u32 rid_in)
{
struct device *parent_dev;
- struct device_node *msi_controller_node;
- struct device_node *msi_np = *np;
- u32 map_mask, masked_rid, rid_base, msi_base, rid_len, phandle;
- int msi_map_len;
- bool matched;
u32 rid_out = rid_in;
- const __be32 *msi_map = NULL;
/*
* Walk up the device parent links looking for one with a
@@ -608,71 +603,14 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
if (!parent_dev->of_node)
continue;
- msi_map = of_get_property(parent_dev->of_node,
- "msi-map", &msi_map_len);
- if (!msi_map)
+ if (!of_property_read_bool(parent_dev->of_node, "msi-map"))
continue;
- if (msi_map_len % (4 * sizeof(__be32))) {
- dev_err(parent_dev, "Error: Bad msi-map length: %d\n",
- msi_map_len);
- return rid_out;
- }
/* We have a good parent_dev and msi_map, let's use them. */
+ of_pci_map_rid(parent_dev->of_node, "msi-map", rid_in, np,
+ &rid_out);
break;
}
- if (!msi_map)
- return rid_out;
-
- /* The default is to select all bits. */
- map_mask = 0xffffffff;
-
- /*
- * Can be overridden by "msi-map-mask" property. If
- * of_property_read_u32() fails, the default is used.
- */
- of_property_read_u32(parent_dev->of_node, "msi-map-mask", &map_mask);
-
- masked_rid = map_mask & rid_in;
- matched = false;
- while (!matched && msi_map_len >= 4 * sizeof(__be32)) {
- rid_base = be32_to_cpup(msi_map + 0);
- phandle = be32_to_cpup(msi_map + 1);
- msi_base = be32_to_cpup(msi_map + 2);
- rid_len = be32_to_cpup(msi_map + 3);
-
- if (rid_base & ~map_mask) {
- dev_err(parent_dev,
- "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
- map_mask, rid_base);
- return rid_out;
- }
-
- msi_controller_node = of_find_node_by_phandle(phandle);
-
- matched = (masked_rid >= rid_base &&
- masked_rid < rid_base + rid_len);
- if (msi_np)
- matched &= msi_np == msi_controller_node;
-
- if (matched && !msi_np) {
- *np = msi_np = msi_controller_node;
- break;
- }
-
- of_node_put(msi_controller_node);
- msi_map_len -= 4 * sizeof(__be32);
- msi_map += 4;
- }
- if (!matched)
- return rid_out;
-
- rid_out = masked_rid - rid_base + msi_base;
- dev_dbg(dev,
- "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
- dev_name(parent_dev), map_mask, rid_base, msi_base,
- rid_len, rid_in, rid_out);
-
return rid_out;
}
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 589b30c68e142..333ac0ddc63f0 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -308,3 +308,105 @@ struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node)
EXPORT_SYMBOL_GPL(of_pci_find_msi_chip_by_node);
#endif /* CONFIG_PCI_MSI */
+
+#define MASK_NAME_LEN 32 /* Safely longer than "iommu-map-mask" */
+
+/**
+ * of_pci_map_rid - Translate a requester ID through a downstream mapping.
+ * @np: root complex device node.
+ * @map_name: property name of the map to use.
+ * @target: optional pointer to a target device node.
+ * @id_out: optional pointer to receive the translated ID.
+ *
+ * Given a PCI requester ID, look up the appropriate implementation-defined
+ * platform ID and/or the target device which receives transactions on that
+ * ID, as per the "iommu-map" and "msi-map" bindings. @target or @id_out may
+ * be NULL if not required. If @target points to a device node pointer, only
+ * entries targeting that node will be matched; if it points to a NULL
+ * value, it will receive the device node for the first matching target entry,
+ * with a reference held.
+ *
+ * Return: 0 on success or a standard error code on failure.
+ */
+int of_pci_map_rid(struct device_node *np, const char *map_name, u32 rid_in,
+ struct device_node **target, u32 *rid_out)
+{
+ u32 map_mask, masked_rid;
+ int map_len;
+ const __be32 *map = NULL;
+ char mask_name[MASK_NAME_LEN];
+
+ if (!np || !map_name || (!target && !rid_out))
+ return -EINVAL;
+
+ map = of_get_property(np, map_name, &map_len);
+ if (!map) {
+ if (target)
+ return -ENODEV;
+ /* Otherwise, no map implies no translation */
+ *rid_out = rid_in;
+ return 0;
+ }
+
+ if (!map_len || map_len % (4 * sizeof(*map))) {
+ pr_err("%s: Error: Bad %s length: %d\n", np->full_name,
+ map_name, map_len);
+ return -EINVAL;
+ }
+
+ /* The default is to select all bits. */
+ map_mask = 0xffffffff;
+
+ /*
+ * Can be overridden by "{iommu,msi}-map-mask" property.
+ * If of_property_read_u32() fails, the default is used.
+ */
+ snprintf(mask_name, MASK_NAME_LEN, "%s-mask", map_name);
+ of_property_read_u32(np, mask_name, &map_mask);
+
+ masked_rid = map_mask & rid_in;
+ for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
+ struct device_node *phandle_node;
+ u32 rid_base = be32_to_cpup(map + 0);
+ u32 phandle = be32_to_cpup(map + 1);
+ u32 out_base = be32_to_cpup(map + 2);
+ u32 rid_len = be32_to_cpup(map + 3);
+
+ if (rid_base & ~map_mask) {
+ pr_err("%s: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
+ np->full_name, map_name, map_name,
+ map_mask, rid_base);
+ return -EINVAL;
+ }
+
+ if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
+ continue;
+
+ phandle_node = of_find_node_by_phandle(phandle);
+ if (!phandle_node)
+ return -ENODEV;
+
+ if (target) {
+ if (*target)
+ of_node_put(phandle_node);
+ else
+ *target = phandle_node;
+
+ if (*target != phandle_node)
+ continue;
+ }
+
+ if (rid_out)
+ *rid_out = masked_rid - rid_base + out_base;
+
+ pr_debug("%s: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
+ np->full_name, map_name, map_mask, rid_base, out_base,
+ rid_len, rid_in, *rid_out);
+ return 0;
+ }
+
+ pr_err("%s: Invalid %s translation - no match for rid 0x%x on %s\n",
+ np->full_name, map_name, rid_in,
+ target && *target ? (*target)->full_name : "any target");
+ return -EINVAL;
+}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index f39ccd5aa7012..07f4baabc1ece 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -153,11 +153,6 @@ struct platform_device *of_device_alloc(struct device_node *np,
}
EXPORT_SYMBOL(of_device_alloc);
-static void of_dma_deconfigure(struct device *dev)
-{
- arch_teardown_dma_ops(dev);
-}
-
/**
* of_platform_device_create_pdata - Alloc, initialize and register an of_device
* @np: pointer to node to create device for
@@ -186,11 +181,10 @@ static struct platform_device *of_platform_device_create_pdata(
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
- of_dma_configure(&dev->dev, dev->dev.of_node);
+ of_dma_configure_masks(&dev->dev, dev->dev.of_node);
of_msi_configure(&dev->dev, dev->dev.of_node);
if (of_device_add(dev) != 0) {
- of_dma_deconfigure(&dev->dev);
platform_device_put(dev);
goto err_clear_flag;
}
@@ -247,7 +241,8 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(&dev->dev);
- of_dma_configure(&dev->dev, dev->dev.of_node);
+ of_dma_configure_masks(&dev->dev, dev->dev.of_node);
+ of_dma_configure_ops(&dev->dev, dev->dev.of_node);
/* Allow the HW Peripheral ID to be overridden */
prop = of_get_property(node, "arm,primecell-periphid", NULL);
@@ -537,11 +532,12 @@ static int of_platform_device_destroy(struct device *dev, void *data)
if (dev->bus == &platform_bus_type)
platform_device_unregister(to_platform_device(dev));
#ifdef CONFIG_ARM_AMBA
- else if (dev->bus == &amba_bustype)
+ else if (dev->bus == &amba_bustype) {
amba_device_unregister(to_amba_device(dev));
+ of_dma_deconfigure(dev);
+ }
#endif
- of_dma_deconfigure(dev);
of_node_clear_flag(dev->of_node, OF_POPULATED);
of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
return 0;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 93f280df34282..cb2481c7df1ac 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1726,7 +1726,8 @@ static void pci_dma_configure(struct pci_dev *dev)
if (IS_ENABLED(CONFIG_OF) &&
bridge->parent && bridge->parent->of_node) {
- of_dma_configure(&dev->dev, bridge->parent->of_node);
+ of_dma_configure_masks(&dev->dev, bridge->parent->of_node);
+ of_dma_configure_ops(&dev->dev, bridge->parent->of_node);
} else if (has_acpi_companion(bridge)) {
struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
enum dev_dma_attr attr = acpi_get_dma_attr(adev);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a35fb8b42e1a8..9912288992279 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -272,6 +272,7 @@ struct device *iommu_device_create(struct device *parent, void *drvdata,
void iommu_device_destroy(struct device *dev);
int iommu_device_link(struct device *dev, struct device *link);
void iommu_device_unlink(struct device *dev, struct device *link);
+int iommu_bus_add_dev(struct device *dev);
/* Window handling function prototypes */
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
diff --git a/include/linux/msm_iommu_domains.h b/include/linux/msm_iommu_domains.h
new file mode 100644
index 0000000000000..811d7737ddb5d
--- /dev/null
+++ b/include/linux/msm_iommu_domains.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_MSM_IOMMU_DOMAINS_H
+#define _LINUX_MSM_IOMMU_DOMAINS_H
+
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/genalloc.h>
+#include <linux/rbtree.h>
+#include <linux/dma-buf.h>
+
+#define MSM_IOMMU_DOMAIN_SECURE 0x1
+
+enum {
+ VIDEO_DOMAIN,
+ CAMERA_DOMAIN,
+ DISPLAY_READ_DOMAIN,
+ DISPLAY_WRITE_DOMAIN,
+ ROTATOR_SRC_DOMAIN,
+ ROTATOR_DST_DOMAIN,
+ MAX_DOMAINS
+};
+
+enum {
+ VIDEO_FIRMWARE_POOL,
+ VIDEO_MAIN_POOL,
+ GEN_POOL,
+};
+
+struct mem_pool {
+ struct mutex pool_mutex;
+ struct gen_pool *gpool;
+ phys_addr_t paddr;
+ unsigned long size;
+ unsigned long free;
+ unsigned int id;
+};
+
+struct msm_iommu_domain {
+ /* iommu domain to map in */
+ struct iommu_domain *domain;
+ /* total number of allocations from this domain */
+ atomic_t allocation_cnt;
+ /* number of iova pools */
+ int npools;
+ /*
+ * array of gen_pools for allocating iovas.
+ * behavior is undefined if these overlap
+ */
+ struct mem_pool *iova_pools;
+};
+
+struct msm_iommu_domain_name {
+ char *name;
+ int domain;
+};
+
+struct iommu_domains_pdata {
+ struct msm_iommu_domain *domains;
+ int ndomains;
+ struct msm_iommu_domain_name *domain_names;
+ int nnames;
+ unsigned int domain_alloc_flags;
+};
+
+struct msm_iova_partition {
+ unsigned long start;
+ unsigned long size;
+};
+
+struct msm_iova_layout {
+ struct msm_iova_partition *partitions;
+ int npartitions;
+ const char *client_name;
+ unsigned int domain_flags;
+ unsigned int is_secure;
+};
+
+#if defined(CONFIG_QCOM_IOMMU)
+extern void msm_iommu_set_client_name(struct iommu_domain *domain,
+ char const *name);
+extern struct iommu_domain *msm_get_iommu_domain(int domain_num);
+extern int msm_allocate_iova_address(unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long *iova);
+
+extern void msm_free_iova_address(unsigned long iova,
+ unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size);
+
+extern int msm_use_iommu(void);
+
+extern int msm_iommu_map_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ phys_addr_t phys_addr,
+ unsigned long size,
+ unsigned long page_size,
+ int cached);
+
+extern void msm_iommu_unmap_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ unsigned long size,
+ unsigned long page_size);
+
+extern int msm_iommu_map_contig_buffer(phys_addr_t phys,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long cached,
+ dma_addr_t *iova_val);
+
+extern void msm_iommu_unmap_contig_buffer(dma_addr_t iova,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size);
+
+extern int msm_register_domain(struct msm_iova_layout *layout);
+extern int msm_unregister_domain(struct iommu_domain *domain);
+
+int msm_map_dma_buf(struct dma_buf *dma_buf, struct sg_table *table,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags, unsigned long iommu_flags);
+
+void msm_unmap_dma_buf(struct sg_table *table, int domain_num,
+ int partition_num);
+#else
+static inline void msm_iommu_set_client_name(struct iommu_domain *domain,
+ char const *name)
+{
+}
+
+static inline struct iommu_domain *msm_get_iommu_domain(int subsys_id)
+{
+ return NULL;
+}
+
+static inline int msm_allocate_iova_address(unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long *iova)
+{
+ return -ENOMEM;
+}
+
+static inline void msm_free_iova_address(unsigned long iova,
+ unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size) { }
+
+static inline int msm_use_iommu(void)
+{
+ return 0;
+}
+
+static inline int msm_iommu_map_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ phys_addr_t phys_addr,
+ unsigned long size,
+ unsigned long page_size,
+ int cached)
+{
+ return -ENODEV;
+}
+
+static inline void msm_iommu_unmap_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ unsigned long size,
+ unsigned long page_size)
+{
+}
+
+static inline int msm_iommu_map_contig_buffer(phys_addr_t phys,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long cached,
+ dma_addr_t *iova_val)
+{
+ *iova_val = phys;
+ return 0;
+}
+
+static inline void msm_iommu_unmap_contig_buffer(dma_addr_t iova,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size)
+{
+}
+
+static inline int msm_register_domain(struct msm_iova_layout *layout)
+{
+ return -ENODEV;
+}
+
+static inline int msm_unregister_domain(struct iommu_domain *domain)
+{
+ return -ENODEV;
+}
+
+static inline int msm_map_dma_buf(struct dma_buf *dma_buf,
+ struct sg_table *table,
+ int domain_num, int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags,
+ unsigned long iommu_flags)
+{
+ return -ENODEV;
+}
+
+static inline void msm_unmap_dma_buf(struct sg_table *table, int domain_num,
+ int partition_num)
+{
+
+}
+
+#endif /* CONFIG_QCOM_IOMMU */
+#endif /* _LINUX_MSM_IOMMU_DOMAINS_H */ \ No newline at end of file
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index cc7dd687a89dd..4b8cb7da6b4a2 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -55,7 +55,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return of_node_get(cpu_dev->of_node);
}
-void of_dma_configure(struct device *dev, struct device_node *np);
+void of_dma_configure_masks(struct device *dev, struct device_node *np);
+int of_dma_configure_ops(struct device *dev, struct device_node *np);
+void of_dma_deconfigure(struct device *dev);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -98,7 +100,15 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
{
return NULL;
}
-static inline void of_dma_configure(struct device *dev, struct device_node *np)
+static inline void of_dma_configure_masks(struct device *dev,
+ struct device_node *np)
+{}
+static inline int of_dma_configure_ops(struct device *dev,
+ struct device_node *np)
+{
+ return 0;
+}
+static inline void of_dma_deconfigure(struct device *dev)
{}
#endif /* CONFIG_OF */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index b969e94439622..4224e7bb7c160 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -17,6 +17,8 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
void of_pci_check_probe_only(void);
+int of_pci_map_rid(struct device_node *np, const char *map_name, u32 rid_in,
+ struct device_node **target, u32 *rid_out);
#else
static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
@@ -52,6 +54,12 @@ of_get_pci_domain_nr(struct device_node *node)
return -1;
}
+static inline int of_pci_map_rid(struct device_node *np, const char *map_name,
+ u32 rid_in, struct device_node **target, u32 *rid_out)
+{
+ return -EINVAL;
+}
+
static inline void of_pci_check_probe_only(void) { }
#endif
diff --git a/include/linux/qcom_iommu.h b/include/linux/qcom_iommu.h
new file mode 100644
index 0000000000000..54f47b378ed66
--- /dev/null
+++ b/include/linux/qcom_iommu.h
@@ -0,0 +1,388 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_IOMMU_H
+#define MSM_IOMMU_H
+
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <soc/qcom/socinfo.h>
+
+/* Private pgprot flag */
+#define IOMMU_PRIV 16
+
+extern pgprot_t pgprot_kernel;
+extern struct bus_type msm_iommu_sec_bus_type;
+extern struct iommu_access_ops iommu_access_ops_v0;
+extern struct iommu_access_ops iommu_access_ops_v1;
+
+/* Domain attributes */
+#define MSM_IOMMU_DOMAIN_PT_CACHEABLE 0x1
+#define MSM_IOMMU_DOMAIN_PT_SECURE 0x2
+
+/* Mask for the cache policy attribute */
+#define MSM_IOMMU_CP_MASK 0x03
+
+#define DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE DOMAIN_ATTR_MAX
+
+/* Maximum number of Machine IDs that we are allowing to be mapped to the same
+ * context bank. The number of MIDs mapped to the same CB does not affect
+ * performance, but there is a practical limit on how many distinct MIDs may
+ * be present. These mappings are typically determined at design time and are
+ * not expected to change at run time.
+ */
+#define MAX_NUM_MIDS 32
+
+/* Maximum number of SMT entries allowed by the system */
+#define MAX_NUM_SMR 128
+
+#define MAX_NUM_BFB_REGS 32
+
+/**
+ * struct msm_iommu_dev - a single IOMMU hardware instance
+ * name Human-readable name given to this IOMMU HW instance
+ * ncb Number of context banks present on this IOMMU HW instance
+ */
+struct msm_iommu_dev {
+ const char *name;
+ int ncb;
+ int ttbr_split;
+};
+
+/**
+ * struct msm_iommu_ctx_dev - an IOMMU context bank instance
+ * name Human-readable name given to this context bank
+ * num Index of this context bank within the hardware
+ * mids List of Machine IDs that are to be mapped into this context
+ * bank, terminated by -1. The MID is a set of signals on the
+ * AXI bus that identifies the function associated with a specific
+ * memory request. (See ARM spec).
+ */
+struct msm_iommu_ctx_dev {
+ const char *name;
+ int num;
+ int mids[MAX_NUM_MIDS];
+};
+
+/**
+ * struct msm_iommu_bfb_settings - a set of IOMMU BFB tuning parameters
+ * regs An array of register offsets to configure
+ * data Values to write to corresponding registers
+ * length Number of valid entries in the offset/val arrays
+ */
+struct msm_iommu_bfb_settings {
+ unsigned int regs[MAX_NUM_BFB_REGS];
+ unsigned int data[MAX_NUM_BFB_REGS];
+ int length;
+};
+
+/**
+ * struct msm_iommu_drvdata - A single IOMMU hardware instance
+ * @base: IOMMU config port base address (VA)
+ * @glb_base: IOMMU config port base address for global register space (VA)
+ * @phys_base: IOMMU physical base address.
+ * @ncb The number of contexts on this IOMMU
+ * @irq: Interrupt number
+ * @core: The bus clock for this IOMMU hardware instance
+ * @iface: The clock for the IOMMU bus interconnect
+ * @name: Human-readable name of this IOMMU device
+ * @bfb_settings: Optional BFB performance tuning parameters
+ * @dev: Struct device this hardware instance is tied to
+ * @list: List head to link all iommus together
+ * @halt_enabled: Set to 1 if IOMMU halt is supported in the IOMMU, 0 otherwise.
+ * @ctx_attach_count: Count of how many context are attached.
+ * @bus_client : Bus client needed to vote for bus bandwidth.
+ * @needs_rem_spinlock : 1 if remote spinlock is needed, 0 otherwise
+ * @powered_on: Powered status of the IOMMU. 0 means powered off.
+ *
+ * A msm_iommu_drvdata holds the global driver data about a single piece
+ * of an IOMMU hardware instance.
+ */
+struct msm_iommu_drvdata {
+ void __iomem *base;
+ phys_addr_t phys_base;
+ void __iomem *glb_base;
+ void __iomem *cb_base;
+ void __iomem *smmu_local_base;
+ int ncb;
+ int ttbr_split;
+ struct clk *core;
+ struct clk *iface;
+ const char *name;
+ struct msm_iommu_bfb_settings *bfb_settings;
+ int sec_id;
+ struct device *dev;
+ struct list_head list;
+ int halt_enabled;
+ unsigned int ctx_attach_count;
+ unsigned int bus_client;
+ int needs_rem_spinlock;
+ int powered_on;
+ unsigned int model;
+};
+
+/**
+ * struct iommu_access_ops - Callbacks for accessing IOMMU
+ * @iommu_power_on: Turn on power to unit
+ * @iommu_power_off: Turn off power to unit
+ * @iommu_bus_vote: Vote for bus bandwidth
+ * @iommu_clk_on: Turn on clks to unit
+ * @iommu_clk_off: Turn off clks to unit
+ * @iommu_lock_initialize: Initialize the remote lock
+ * @iommu_lock_acquire: Acquire any locks needed
+ * @iommu_lock_release: Release locks needed
+ */
+struct iommu_access_ops {
+ int (*iommu_power_on)(struct msm_iommu_drvdata *);
+ void (*iommu_power_off)(struct msm_iommu_drvdata *);
+ int (*iommu_bus_vote)(struct msm_iommu_drvdata *drvdata,
+ unsigned int vote);
+ int (*iommu_clk_on)(struct msm_iommu_drvdata *);
+ void (*iommu_clk_off)(struct msm_iommu_drvdata *);
+ void * (*iommu_lock_initialize)(void);
+ void (*iommu_lock_acquire)(unsigned int need_extra_lock);
+ void (*iommu_lock_release)(unsigned int need_extra_lock);
+};
+
+void msm_iommu_add_drv(struct msm_iommu_drvdata *drv);
+void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv);
+void program_iommu_bfb_settings(void __iomem *base,
+ const struct msm_iommu_bfb_settings *bfb_settings);
+void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata);
+void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata);
+
+/**
+ * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance
+ * @num: Hardware context number of this context
+ * @pdev: Platform device associated wit this HW instance
+ * @attached_elm: List element for domains to track which devices are
+ * attached to them
+ * @attached_domain Domain currently attached to this context (if any)
+ * @name Human-readable name of this context device
+ * @sids List of Stream IDs mapped to this context
+ * @nsid Number of Stream IDs mapped to this context
+ * @secure_context true if this is a secure context programmed by
+ the secure environment, false otherwise
+ * @asid ASID used with this context.
+ * @attach_count Number of time this context has been attached.
+ *
+ * A msm_iommu_ctx_drvdata holds the driver data for a single context bank
+ * within each IOMMU hardware instance
+ */
+struct msm_iommu_ctx_drvdata {
+ int num;
+ struct platform_device *pdev;
+ struct list_head attached_elm;
+ struct iommu_domain *attached_domain;
+ const char *name;
+ u32 sids[MAX_NUM_SMR];
+ unsigned int nsid;
+ unsigned int secure_context;
+ int asid;
+ int attach_count;
+ u32 sid_mask[MAX_NUM_SMR];
+ unsigned int n_sid_mask;
+};
+
+enum dump_reg {
+ DUMP_REG_FIRST,
+ DUMP_REG_FAR0 = DUMP_REG_FIRST,
+ DUMP_REG_FAR1,
+ DUMP_REG_PAR0,
+ DUMP_REG_PAR1,
+ DUMP_REG_FSR,
+ DUMP_REG_FSYNR0,
+ DUMP_REG_FSYNR1,
+ DUMP_REG_TTBR0_0,
+ DUMP_REG_TTBR0_1,
+ DUMP_REG_TTBR1_0,
+ DUMP_REG_TTBR1_1,
+ DUMP_REG_SCTLR,
+ DUMP_REG_ACTLR,
+ DUMP_REG_PRRR,
+ DUMP_REG_MAIR0 = DUMP_REG_PRRR,
+ DUMP_REG_NMRR,
+ DUMP_REG_MAIR1 = DUMP_REG_NMRR,
+ DUMP_REG_CBAR_N,
+ DUMP_REG_CBFRSYNRA_N,
+ MAX_DUMP_REGS,
+};
+
+enum dump_reg_type {
+ DRT_CTX_REG,
+ DRT_GLOBAL_REG,
+ DRT_GLOBAL_REG_N,
+};
+
+enum model_id {
+ QSMMUv1 = 1,
+ QSMMUv2,
+ MMU_500 = 500,
+ MAX_MODEL,
+};
+
+struct dump_regs_tbl_entry {
+ /*
+ * To keep things context-bank-agnostic, we only store the
+ * register offset in `reg_offset'
+ */
+ unsigned int reg_offset;
+ const char *name;
+ int must_be_present;
+ enum dump_reg_type dump_reg_type;
+};
+extern struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS];
+
+#define COMBINE_DUMP_REG(upper, lower) (((u64) upper << 32) | lower)
+
+struct msm_iommu_context_reg {
+ uint32_t val;
+ bool valid;
+};
+
+void print_ctx_regs(struct msm_iommu_context_reg regs[]);
+
+/*
+ * Interrupt handler for the IOMMU context fault interrupt. Hooking the
+ * interrupt is not supported in the API yet, but this will print an error
+ * message and dump useful IOMMU registers.
+ */
+irqreturn_t msm_iommu_global_fault_handler(int irq, void *dev_id);
+irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id);
+irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id);
+irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id);
+
+enum {
+ PROC_APPS,
+ PROC_GPU,
+ PROC_MAX
+};
+
+/* Expose structure to allow kgsl iommu driver to use the same structure to
+ * communicate to GPU the addresses of the flag and turn variables.
+ */
+struct remote_iommu_petersons_spinlock {
+ uint32_t flag[PROC_MAX];
+ uint32_t turn;
+};
+
+#ifdef CONFIG_QCOM_IOMMU_V1
+void *msm_iommu_lock_initialize(void);
+void msm_iommu_mutex_lock(void);
+void msm_iommu_mutex_unlock(void);
+void msm_set_iommu_access_ops(struct iommu_access_ops *ops);
+struct iommu_access_ops *msm_get_iommu_access_ops(void);
+#else
+static inline void *msm_iommu_lock_initialize(void)
+{
+ return NULL;
+}
+static inline void msm_iommu_mutex_lock(void) { }
+static inline void msm_iommu_mutex_unlock(void) { }
+static inline void msm_set_iommu_access_ops(struct iommu_access_ops *ops)
+{
+
+}
+static inline struct iommu_access_ops *msm_get_iommu_access_ops(void)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_QCOM_IOMMU_V1
+/*
+ * Look up an IOMMU context device by its context name. NULL if none found.
+ * Useful for testing and drivers that do not yet fully have IOMMU stuff in
+ * their platform devices.
+ */
+struct device *msm_iommu_get_ctx(const char *ctx_name);
+#else
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ return NULL;
+}
+#endif
+
+/*
+ * Function to program the global registers of an IOMMU securely.
+ * This should only be called on IOMMUs for which kernel programming
+ * of global registers is not possible
+ */
+void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops);
+int msm_iommu_sec_program_iommu(struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata);
+int is_vfe_secure(void);
+
+#ifdef CONFIG_MSM_IOMMU_V0
+static inline int msm_soc_version_supports_iommu_v0(void)
+{
+ static int soc_supports_v0 = -1;
+#ifdef CONFIG_OF
+ struct device_node *node;
+#endif
+
+ if (soc_supports_v0 != -1)
+ return soc_supports_v0;
+
+#ifdef CONFIG_OF
+ node = of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v0");
+ if (node) {
+ soc_supports_v0 = 1;
+ of_node_put(node);
+ return 1;
+ }
+#endif
+ if (cpu_is_msm8960() &&
+ SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 2) {
+ soc_supports_v0 = 0;
+ return 0;
+ }
+
+ if (cpu_is_msm8x60() &&
+ (SOCINFO_VERSION_MAJOR(socinfo_get_version()) != 2 ||
+ SOCINFO_VERSION_MINOR(socinfo_get_version()) < 1)) {
+ soc_supports_v0 = 0;
+ return 0;
+ }
+
+ soc_supports_v0 = 1;
+ return 1;
+}
+#else
+static inline int msm_soc_version_supports_iommu_v0(void)
+{
+ return 0;
+}
+#endif
+
+int msm_iommu_get_scm_call_avail(void);
+void msm_iommu_check_scm_call_avail(void);
+
+u32 msm_iommu_get_mair0(void);
+u32 msm_iommu_get_mair1(void);
+u32 msm_iommu_get_prrr(void);
+u32 msm_iommu_get_nmrr(void);
+
+/* events for notifiers passed to msm_iommu_register_notify */
+#define TLB_SYNC_TIMEOUT 1
+
+#ifdef CONFIG_QCOM_IOMMU_V1
+void msm_iommu_register_notify(struct notifier_block *nb);
+#else
+static inline void msm_iommu_register_notify(struct notifier_block *nb)
+{
+}
+#endif
+
+#endif
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
new file mode 100644
index 0000000000000..29715506ed463
--- /dev/null
+++ b/include/soc/qcom/socinfo.h
@@ -0,0 +1,610 @@
+/* Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SOCINFO_H_
+#define _ARCH_ARM_MACH_MSM_SOCINFO_H_
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/of_fdt.h>
+#include <linux/of.h>
+
+#include <asm/cputype.h>
+/*
+ * SOC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits. For example:
+ * 1.0 -> 0x00010000
+ * 2.3 -> 0x00020003
+ */
+#define SOCINFO_VERSION_MAJOR(ver) ((ver & 0xffff0000) >> 16)
+#define SOCINFO_VERSION_MINOR(ver) (ver & 0x0000ffff)
+
+#ifdef CONFIG_OF
+#define of_board_is_cdp() of_machine_is_compatible("qcom,cdp")
+#define of_board_is_sim() of_machine_is_compatible("qcom,sim")
+#define of_board_is_rumi() of_machine_is_compatible("qcom,rumi")
+#define of_board_is_fluid() of_machine_is_compatible("qcom,fluid")
+#define of_board_is_liquid() of_machine_is_compatible("qcom,liquid")
+#define of_board_is_dragonboard() \
+ of_machine_is_compatible("qcom,dragonboard")
+#define of_board_is_cdp() of_machine_is_compatible("qcom,cdp")
+#define of_board_is_mtp() of_machine_is_compatible("qcom,mtp")
+#define of_board_is_qrd() of_machine_is_compatible("qcom,qrd")
+#define of_board_is_xpm() of_machine_is_compatible("qcom,xpm")
+#define of_board_is_skuf() of_machine_is_compatible("qcom,skuf")
+#define of_board_is_sbc() of_machine_is_compatible("qcom,sbc")
+
+#define machine_is_msm8974() of_machine_is_compatible("qcom,msm8974")
+#define machine_is_msm9625() of_machine_is_compatible("qcom,msm9625")
+#define machine_is_msm8610() of_machine_is_compatible("qcom,msm8610")
+#define machine_is_msm8226() of_machine_is_compatible("qcom,msm8226")
+#define machine_is_apq8074() of_machine_is_compatible("qcom,apq8074")
+#define machine_is_msm8926() of_machine_is_compatible("qcom,msm8926")
+
+#define early_machine_is_msm8610() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8610")
+#define early_machine_is_msmferrum() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmferrum")
+#define early_machine_is_msm8916() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8916")
+#define early_machine_is_msm8936() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8936")
+#define early_machine_is_msm8939() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8939")
+#define early_machine_is_apq8084() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,apq8084")
+#define early_machine_is_mdm9630() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mdm9630")
+#define early_machine_is_msmzirc() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmzirc")
+#define early_machine_is_fsm9900() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,fsm9900")
+#define early_machine_is_msm8994() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8994")
+#define early_machine_is_msm8992() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8992")
+#define early_machine_is_fsm9010() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,fsm9010")
+#else
+#define of_board_is_sim() 0
+#define of_board_is_rumi() 0
+#define of_board_is_fluid() 0
+#define of_board_is_liquid() 0
+#define of_board_is_dragonboard() 0
+#define of_board_is_cdp() 0
+#define of_board_is_mtp() 0
+#define of_board_is_qrd() 0
+#define of_board_is_xpm() 0
+#define of_board_is_skuf() 0
+#define of_board_is_sbc() 0
+
+#define machine_is_msm8974() 0
+#define machine_is_msm9625() 0
+#define machine_is_msm8610() 0
+#define machine_is_msm8226() 0
+#define machine_is_apq8074() 0
+#define machine_is_msm8926() 0
+
+#define early_machine_is_msm8610() 0
+#define early_machine_is_msmferrum() 0
+#define early_machine_is_msm8916() 0
+#define early_machine_is_msm8936() 0
+#define early_machine_is_msm8939() 0
+#define early_machine_is_apq8084() 0
+#define early_machine_is_mdm9630() 0
+#define early_machine_is_fsm9900() 0
+#define early_machine_is_fsm9010() 0
+#endif
+
+#define PLATFORM_SUBTYPE_MDM 1
+#define PLATFORM_SUBTYPE_INTERPOSERV3 2
+#define PLATFORM_SUBTYPE_SGLTE 6
+
+enum msm_cpu {
+ MSM_CPU_UNKNOWN = 0,
+ MSM_CPU_7X01,
+ MSM_CPU_7X25,
+ MSM_CPU_7X27,
+ MSM_CPU_8X50,
+ MSM_CPU_8X50A,
+ MSM_CPU_7X30,
+ MSM_CPU_8X55,
+ MSM_CPU_8X60,
+ MSM_CPU_8960,
+ MSM_CPU_8960AB,
+ MSM_CPU_7X27A,
+ FSM_CPU_9XXX,
+ MSM_CPU_7X25A,
+ MSM_CPU_7X25AA,
+ MSM_CPU_7X25AB,
+ MSM_CPU_8064,
+ MSM_CPU_8064AB,
+ MSM_CPU_8064AA,
+ MSM_CPU_8930,
+ MSM_CPU_8930AA,
+ MSM_CPU_8930AB,
+ MSM_CPU_7X27AA,
+ MSM_CPU_9615,
+ MSM_CPU_8974,
+ MSM_CPU_8974PRO_AA,
+ MSM_CPU_8974PRO_AB,
+ MSM_CPU_8974PRO_AC,
+ MSM_CPU_8627,
+ MSM_CPU_8625,
+ MSM_CPU_9625,
+ MSM_CPU_FERRUM,
+ MSM_CPU_8916,
+ MSM_CPU_8936,
+ MSM_CPU_8939,
+ MSM_CPU_8226,
+ MSM_CPU_8610,
+ MSM_CPU_8625Q,
+ MSM_CPU_8084,
+ MSM_CPU_9630,
+ FSM_CPU_9900,
+ MSM_CPU_ZIRC,
+ MSM_CPU_8994,
+ MSM_CPU_8992,
+ FSM_CPU_9010,
+};
+
+struct msm_soc_info {
+ enum msm_cpu generic_soc_type;
+ char *soc_id_string;
+};
+
+enum pmic_model {
+ PMIC_MODEL_PM8058 = 13,
+ PMIC_MODEL_PM8028 = 14,
+ PMIC_MODEL_PM8901 = 15,
+ PMIC_MODEL_PM8027 = 16,
+ PMIC_MODEL_ISL_9519 = 17,
+ PMIC_MODEL_PM8921 = 18,
+ PMIC_MODEL_PM8018 = 19,
+ PMIC_MODEL_PM8015 = 20,
+ PMIC_MODEL_PM8014 = 21,
+ PMIC_MODEL_PM8821 = 22,
+ PMIC_MODEL_PM8038 = 23,
+ PMIC_MODEL_PM8922 = 24,
+ PMIC_MODEL_PM8917 = 25,
+ PMIC_MODEL_UNKNOWN = 0xFFFFFFFF
+};
+
+enum msm_cpu socinfo_get_msm_cpu(void);
+uint32_t socinfo_get_id(void);
+uint32_t socinfo_get_version(void);
+uint32_t socinfo_get_raw_id(void);
+char *socinfo_get_build_id(void);
+uint32_t socinfo_get_platform_type(void);
+uint32_t socinfo_get_platform_subtype(void);
+uint32_t socinfo_get_platform_version(void);
+enum pmic_model socinfo_get_pmic_model(void);
+uint32_t socinfo_get_pmic_die_revision(void);
+int __init socinfo_init(void) __must_check;
+const int read_msm_cpu_type(void);
+const int get_core_count(void);
+const int cpu_is_krait(void);
+const int cpu_is_krait_v1(void);
+const int cpu_is_krait_v2(void);
+const int cpu_is_krait_v3(void);
+
+static inline int cpu_is_msm7x01(void)
+{
+#ifdef CONFIG_ARCH_MSM7X01A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X01;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25(void)
+{
+#ifdef CONFIG_ARCH_MSM7X25
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27(void)
+{
+#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27a(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27A;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27aa(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25a(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25A;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25aa(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25ab(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x30(void)
+{
+#ifdef CONFIG_ARCH_MSM7X30
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X30;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_qsd8x50(void)
+{
+#ifdef CONFIG_ARCH_QSD8X50
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8X50;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8x55(void)
+{
+#ifdef CONFIG_ARCH_MSM7X30
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8X55;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8x60(void)
+{
+#ifdef CONFIG_ARCH_MSM8X60
+ return read_msm_cpu_type() == MSM_CPU_8X60;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8960(void)
+{
+ return 0;
+}
+
+static inline int cpu_is_msm8960ab(void)
+{
+ return 0;
+}
+
+static inline int cpu_is_apq8064(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_apq8064ab(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_apq8064aa(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930aa(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930ab(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8627(void)
+{
+/* 8930 and 8627 will share the same CONFIG_ARCH type unless otherwise needed */
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8627;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_fsm9xxx(void)
+{
+#ifdef CONFIG_ARCH_FSM9XXX
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == FSM_CPU_9XXX;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm9615(void)
+{
+#ifdef CONFIG_ARCH_MSM9615
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_9615;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8625(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8625;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_aa(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_ab(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_ac(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AC;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msmferrum(void)
+{
+#ifdef CONFIG_ARCH_MSMFERRUM
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_FERRUM;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8916(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8916;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8936(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8936;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8939(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8939;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8226(void)
+{
+#ifdef CONFIG_ARCH_MSM8226
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8226;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8610(void)
+{
+#ifdef CONFIG_ARCH_MSM8610
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8610;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8625q(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8625Q;
+#else
+ return 0;
+#endif
+}
+
+static inline int soc_class_is_msm8960(void)
+{
+ return cpu_is_msm8960() || cpu_is_msm8960ab();
+}
+
+static inline int soc_class_is_apq8064(void)
+{
+ return cpu_is_apq8064() || cpu_is_apq8064ab() || cpu_is_apq8064aa();
+}
+
+static inline int soc_class_is_msm8930(void)
+{
+ return cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8930ab() ||
+ cpu_is_msm8627();
+}
+
+static inline int soc_class_is_msm8974(void)
+{
+ return cpu_is_msm8974() || cpu_is_msm8974pro_aa() ||
+ cpu_is_msm8974pro_ab() || cpu_is_msm8974pro_ac();
+}
+
+#endif