aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mach-omap2
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2015-06-06 00:13:40 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2015-07-25 15:28:14 +0100
commitf746929ffdc8a83c0e6092343d4475f6485e13d3 (patch)
tree3d8a782cdc5232e37dd21a7a3565d56a95b62080 /arch/arm/mach-omap2
parent4e1f8a6f1d978f033f1751e2887b3a69fab3f878 (diff)
Revert "ARM: OMAP4: remove dead kconfig option OMAP4_ERRATA_I688"
This reverts commit 606da4826b89b044b51e3a84958b802204cfe4c7. We actually need this code for proper behaviour of OMAP4, and it needs fixing a different way other than just removing the code. Disabling code which is necessary in the hopes of persuing multiplatform kernels is a stupid approach. Acked-by: Tony Lindgren <tony@atomide.com> Acked-by: Richard Woodruff <r-woodruff2@ti.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mach-omap2')
-rw-r--r--arch/arm/mach-omap2/Kconfig21
-rw-r--r--arch/arm/mach-omap2/common.c1
-rw-r--r--arch/arm/mach-omap2/common.h3
-rw-r--r--arch/arm/mach-omap2/io.c2
-rw-r--r--arch/arm/mach-omap2/omap-secure.h7
-rw-r--r--arch/arm/mach-omap2/omap4-common.c69
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S2
7 files changed, 105 insertions, 0 deletions
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index ecc04ff13e95..2128441430ad 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -240,6 +240,27 @@ config OMAP3_SDRC_AC_TIMING
wish to say no. Selecting yes without understanding what is
going on could result in system crashes;
+config OMAP4_ERRATA_I688
+ bool "OMAP4 errata: Async Bridge Corruption"
+ depends on (ARCH_OMAP4 || SOC_OMAP5) && !ARCH_MULTIPLATFORM
+ select ARCH_HAS_BARRIERS
+ help
+ If a data is stalled inside asynchronous bridge because of back
+ pressure, it may be accepted multiple times, creating pointer
+ misalignment that will corrupt next transfers on that data path
+ until next reset of the system (No recovery procedure once the
+ issue is hit, the path remains consistently broken). Async bridge
+ can be found on path between MPU to EMIF and MPU to L3 interconnect.
+ This situation can happen only when the idle is initiated by a
+ Master Request Disconnection (which is trigged by software when
+ executing WFI on CPU).
+ The work-around for this errata needs all the initiators connected
+ through async bridge must ensure that data path is properly drained
+ before issuing WFI. This condition will be met if one Strongly ordered
+ access is performed to the target right before executing the WFI.
+ In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
+ IO barrier ensure that there is no synchronisation loss on initiators
+ operating on both interconnect port simultaneously.
endmenu
endif
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index eae6a0e87c90..484cdadfb187 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -30,4 +30,5 @@ int __weak omap_secure_ram_reserve_memblock(void)
void __init omap_reserve(void)
{
omap_secure_ram_reserve_memblock();
+ omap_barrier_reserve_memblock();
}
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index cf3cf22ecd42..46e24581d624 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -200,6 +200,9 @@ void __init omap4_map_io(void);
void __init omap5_map_io(void);
void __init ti81xx_map_io(void);
+/* omap_barriers_init() is OMAP4 only */
+void omap_barriers_init(void);
+
/**
* omap_test_timeout - busy-loop, testing a condition
* @cond: condition to test until it evaluates to true
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 820dde8b5b04..7743e3672f98 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -306,6 +306,7 @@ void __init am33xx_map_io(void)
void __init omap4_map_io(void)
{
iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
+ omap_barriers_init();
}
#endif
@@ -313,6 +314,7 @@ void __init omap4_map_io(void)
void __init omap5_map_io(void)
{
iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
+ omap_barriers_init();
}
#endif
/*
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index af2851fbcdf0..dec2b05d184b 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -70,6 +70,13 @@ extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
extern u32 rx51_secure_update_aux_cr(u32 set_bits, u32 clear_bits);
extern u32 rx51_secure_rng_call(u32 ptr, u32 count, u32 flag);
+#ifdef CONFIG_OMAP4_ERRATA_I688
+extern int omap_barrier_reserve_memblock(void);
+#else
+static inline void omap_barrier_reserve_memblock(void)
+{ }
+#endif
+
#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
void set_cntfreq(void);
#else
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 16350eefa66c..7bb116a6f86f 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -51,6 +51,75 @@ static void __iomem *twd_base;
#define IRQ_LOCALTIMER 29
+#ifdef CONFIG_OMAP4_ERRATA_I688
+/* Used to implement memory barrier on DRAM path */
+#define OMAP4_DRAM_BARRIER_VA 0xfe600000
+
+void __iomem *dram_sync, *sram_sync;
+
+static phys_addr_t paddr;
+static u32 size;
+
+void omap_bus_sync(void)
+{
+ if (dram_sync && sram_sync) {
+ writel_relaxed(readl_relaxed(dram_sync), dram_sync);
+ writel_relaxed(readl_relaxed(sram_sync), sram_sync);
+ isb();
+ }
+}
+EXPORT_SYMBOL(omap_bus_sync);
+
+static int __init omap4_sram_init(void)
+{
+ struct device_node *np;
+ struct gen_pool *sram_pool;
+
+ np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
+ if (!np)
+ pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
+ __func__);
+ sram_pool = of_get_named_gen_pool(np, "sram", 0);
+ if (!sram_pool)
+ pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
+ __func__);
+ else
+ sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
+
+ return 0;
+}
+omap_arch_initcall(omap4_sram_init);
+
+/* Steal one page physical memory for barrier implementation */
+int __init omap_barrier_reserve_memblock(void)
+{
+
+ size = ALIGN(PAGE_SIZE, SZ_1M);
+ paddr = arm_memblock_steal(size, SZ_1M);
+
+ return 0;
+}
+
+void __init omap_barriers_init(void)
+{
+ struct map_desc dram_io_desc[1];
+
+ dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
+ dram_io_desc[0].pfn = __phys_to_pfn(paddr);
+ dram_io_desc[0].length = size;
+ dram_io_desc[0].type = MT_MEMORY_RW_SO;
+ iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
+ dram_sync = (void __iomem *) dram_io_desc[0].virtual;
+
+ pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n",
+ (long long) paddr, dram_io_desc[0].virtual);
+
+}
+#else
+void __init omap_barriers_init(void)
+{}
+#endif
+
void gic_dist_disable(void)
{
if (gic_dist_base_addr)
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index ad1bb9431e94..b84a0122d823 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -333,9 +333,11 @@ ENDPROC(omap4_cpu_resume)
#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
+#ifndef CONFIG_OMAP4_ERRATA_I688
ENTRY(omap_bus_sync)
ret lr
ENDPROC(omap_bus_sync)
+#endif
ENTRY(omap_do_wfi)
stmfd sp!, {lr}