imx6q: add cpu suspend/resume support

add standby/mem pm state.
suspend cpu in iram.

Signed-off-by: Jason Chen <jason.chen@linaro.org>
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index aba7321..8c33917 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -70,4 +70,4 @@
 obj-$(CONFIG_SMP) += platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
 obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
-obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o pm-imx6q.o
+obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o pm-imx6q.o suspend-imx6q.o
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
index 0ac0094..3ee5edb 100644
--- a/arch/arm/mach-imx/clock-imx6q.c
+++ b/arch/arm/mach-imx/clock-imx6q.c
@@ -18,6 +18,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/suspend.h>
 #include <asm/div64.h>
 #include <asm/mach/map.h>
 #include <mach/clock.h>
@@ -1945,6 +1946,45 @@
 	_REGISTER_CLOCK(NULL, "gpt_clk", gpt_clk),
 };
 
+static u32 ccr, clpcr, ccgr1, ccgr6;
+#define BP_CCR_RBC_EN	27
+void imx6q_ccm_pre_suspend(suspend_state_t state)
+{
+	ccr = readl_relaxed(CCR);
+	if (state == PM_SUSPEND_MEM) {
+		writel_relaxed(ccr | (1 << BP_CCR_RBC_EN), CCR);
+	}
+	clpcr = readl_relaxed(CLPCR);
+
+	/* for GPU */
+	ccgr1 = readl_relaxed(CCGR1);
+	ccgr6 = readl_relaxed(CCGR6);
+}
+
+void imx6q_ccm_gpu_pre_suspend(void)
+{
+	/* disable clocks */
+	writel_relaxed(ccgr1 & 0xf0ffffff, CCGR1);
+	writel_relaxed(ccgr6 & 0x00003fff, CCGR6);
+}
+
+void imx6q_ccm_gpu_post_resume(void)
+{
+	/* enable clocks */
+	writel_relaxed(ccgr1 | 0x0f000000, CCGR1);
+	writel_relaxed(ccgr6 | 0x0000c000, CCGR6);
+}
+
+void imx6q_ccm_post_resume(void)
+{
+	writel_relaxed(ccr, CCR);
+	writel_relaxed(clpcr, CLPCR);
+
+	/* for GPU */
+	writel_relaxed(ccgr1, CCGR1);
+	writel_relaxed(ccgr6, CCGR6);
+}
+
 int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
 {
 	u32 val = readl_relaxed(CLPCR);
@@ -1972,9 +2012,18 @@
 		val |= BM_CLPCR_SBYOS;
 		val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
 		break;
+	case ARM_POWER_OFF:
+		val |= 0x2 << BP_CLPCR_LPM;
+		val |= 0x3 << BP_CLPCR_STBY_COUNT;
+		val |= BM_CLPCR_VSTBY;
+		val |= BM_CLPCR_SBYOS;
+		val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
+		val |= BM_CLPCR_WB_PER_AT_LPM;
+		break;
 	default:
 		return -EINVAL;
 	}
+
 	writel_relaxed(val, CLPCR);
 
 	return 0;
@@ -2016,7 +2065,7 @@
 	}
 
 	/* only keep necessary clocks on */
-	writel_relaxed(0x3 << CG0  | 0x3 << CG1  | 0x3 << CG2,	CCGR0);
+	writel_relaxed(0x3 << CG0  | 0x3 << CG1  | 0x3 << CG2,  CCGR0);
 	writel_relaxed(0x3 << CG8  | 0x3 << CG9  | 0x3 << CG10,	CCGR2);
 	writel_relaxed(0x3 << CG10 | 0x3 << CG12 | 0x1 << CG14,	CCGR3);
 	writel_relaxed(0x3 << CG4  | 0x3 << CG6  | 0x3 << CG7,	CCGR4);
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index e1537f9..3462bb4 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -15,41 +15,95 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/suspend.h>
 #include <asm/hardware/gic.h>
 
+#define GPC_CNTR		0x000
 #define GPC_IMR1		0x008
+#define GPC_ISR1		0x018
+#define GPC_ISR2		0x01c
+#define GPC_ISR3		0x020
+#define GPC_ISR4		0x024
+#define GPC_PGC_GPU_PGCR	0x260
 #define GPC_PGC_CPU_PDN		0x2a0
+#define GPC_PGC_CPU_PUPSCR	0x2a4
+#define GPC_PGC_CPU_PDNSCR	0x2a8
 
 #define IMR_NUM			4
+#define ISR_NUM			4
 
 static void __iomem *gpc_base;
 static u32 gpc_wake_irqs[IMR_NUM];
 static u32 gpc_saved_imrs[IMR_NUM];
+static u32 gpc_saved_cntr;
+static u32 gpc_saved_cpu_pdn;
+static u32 gpc_saved_cpu_pupscr;
+static u32 gpc_saved_cpu_pdnscr;
 
-void imx_gpc_pre_suspend(void)
+bool imx_gpc_wake_irq_pending(void)
+{
+	void __iomem *reg_isr1 = gpc_base + GPC_ISR1;
+	int i;
+	u32 val;
+
+	for (i = 0; i < ISR_NUM; i++) {
+		val = readl_relaxed(reg_isr1 + i * 4);
+		if (val & gpc_wake_irqs[i])
+			return true;
+	}
+
+	return false;
+}
+
+void imx_gpc_pre_suspend(suspend_state_t state)
 {
 	void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
 	int i;
 
-	/* Tell GPC to power off ARM core when suspend */
-	writel_relaxed(0x1, gpc_base + GPC_PGC_CPU_PDN);
+	gpc_saved_cntr = readl_relaxed(gpc_base + GPC_CNTR);
+	gpc_saved_cpu_pdn = readl_relaxed(gpc_base + GPC_PGC_CPU_PDN);
+	gpc_saved_cpu_pupscr = readl_relaxed(gpc_base + GPC_PGC_CPU_PUPSCR);
+	gpc_saved_cpu_pdnscr = readl_relaxed(gpc_base + GPC_PGC_CPU_PDNSCR);
 
 	for (i = 0; i < IMR_NUM; i++) {
 		gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
 		writel_relaxed(~gpc_wake_irqs[i], reg_imr1 + i * 4);
 	}
+
+	/* Power down and power up sequence */
+	writel_relaxed(0xFFFFFFFF, gpc_base + GPC_PGC_CPU_PUPSCR);
+	writel_relaxed(0xFFFFFFFF, gpc_base + GPC_PGC_CPU_PDNSCR);
+
+	if (state == PM_SUSPEND_MEM) {
+		/* Tell GPC to power off ARM core when suspend */
+		writel_relaxed(0x1, gpc_base + GPC_PGC_CPU_PDN);
+
+		/* GPU PGCR */
+		writel_relaxed(0x1, gpc_base + GPC_PGC_GPU_PGCR);
+		/* GPU/VPU power down request */
+		writel_relaxed(0x1, gpc_base + GPC_CNTR);
+	}
 }
 
-void imx_gpc_post_resume(void)
+void imx_gpc_post_resume(suspend_state_t state)
 {
 	void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
 	int i;
 
-	/* Keep ARM core powered on for other low-power modes */
-	writel_relaxed(0x0, gpc_base + GPC_PGC_CPU_PDN);
+	writel_relaxed(gpc_saved_cpu_pdnscr, gpc_base + GPC_PGC_CPU_PDNSCR);
+	writel_relaxed(gpc_saved_cpu_pupscr, gpc_base + GPC_PGC_CPU_PUPSCR);
+	writel_relaxed(gpc_saved_cpu_pdn, gpc_base + GPC_PGC_CPU_PDN);
+	writel_relaxed(gpc_saved_cntr, gpc_base + GPC_CNTR);
 
 	for (i = 0; i < IMR_NUM; i++)
 		writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4);
+
+	if (state == PM_SUSPEND_MEM) {
+		/* GPU PGCR */
+		writel_relaxed(0x1, gpc_base + GPC_PGC_GPU_PGCR);
+		/* GPU/VPU power down request */
+		writel_relaxed(gpc_saved_cntr | 0x2, gpc_base + GPC_CNTR);
+	}
 }
 
 static int imx_gpc_irq_set_wake(struct irq_data *d, unsigned int on)
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 2cc33f3..940a91e 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -119,6 +119,7 @@
 #include <asm/sizes.h>
 #include <asm/mach/map.h>
 #include <mach/hardware.h>
+void __iomem *iomux_base;
 static struct map_desc imx_mx6q_fix_desc[] = {
 	{
 		.virtual	= MX6Q_IO_P2V(MX6Q_IOMUXC_BASE_ADDR),
@@ -131,7 +132,8 @@
 void __init mx6q_fix_map_io(void)
 {
 	iotable_init(imx_mx6q_fix_desc, ARRAY_SIZE(imx_mx6q_fix_desc));
-	mxc_iomux_v3_init(MX6Q_IO_P2V(MX6Q_IOMUXC_BASE_ADDR));
+	iomux_base = (void __iomem *)MX6Q_IO_P2V(MX6Q_IOMUXC_BASE_ADDR);
+	mxc_iomux_v3_init(iomux_base);
 }
 
 static void __init imx6q_map_io(void)
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index c461e98..28575b7 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -21,6 +21,9 @@
 #define BP_MMDC_MAPSR_PSD	0
 #define BP_MMDC_MAPSR_PSS	4
 
+static int idx;
+void __iomem *mmdc_base_array[2];
+
 static int __devinit imx_mmdc_probe(struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
@@ -31,6 +34,8 @@
 	mmdc_base = of_iomap(np, 0);
 	WARN_ON(!mmdc_base);
 
+	mmdc_base_array[idx++] = mmdc_base;
+
 	reg = mmdc_base + MMDC_MAPSR;
 
 	/* Enable automatic power saving */
diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c
index f20f191..3819789 100644
--- a/arch/arm/mach-imx/pm-imx6q.c
+++ b/arch/arm/mach-imx/pm-imx6q.c
@@ -14,32 +14,111 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/suspend.h>
+#include <linux/iram_alloc.h>
+#include <linux/delay.h>
 #include <asm/cacheflush.h>
 #include <asm/proc-fns.h>
 #include <asm/suspend.h>
 #include <asm/hardware/cache-l2x0.h>
+#include <asm/tlb.h>
+#include <asm/mach/map.h>
 #include <mach/common.h>
 #include <mach/hardware.h>
 
+#define ANATOP_REG_2P5		0x130
+#define ANATOP_REG_CORE		0x140
+
 extern unsigned long phys_l2x0_saved_regs;
 
-static int imx6q_suspend_finish(unsigned long val)
+static void __iomem *anatop_base;
+static u32 anatop[2];
+
+static void *suspend_iram_base;
+static unsigned long iram_paddr, cpaddr;
+static void (*suspend_in_iram)(suspend_state_t state,
+	unsigned long iram_paddr, unsigned long suspend_iram_base) = NULL;
+
+extern void mx6q_suspend(suspend_state_t state);
+
+static void imx6q_anatop_pre_suspend(void)
 {
-	cpu_do_idle();
-	return 0;
+	u32 reg;
+
+	/* save registers */
+	anatop[0] = readl_relaxed(anatop_base + ANATOP_REG_2P5);
+	anatop[1] = readl_relaxed(anatop_base + ANATOP_REG_CORE);
+
+	/* Enable weak 2P5 linear regulator */
+	reg = readl_relaxed(anatop_base + ANATOP_REG_2P5);
+	reg |= 1 << 18;
+	writel_relaxed(reg, anatop_base + ANATOP_REG_2P5);
+	/* Make sure ARM and SOC domain has same voltage */
+	reg = readl_relaxed(anatop_base + ANATOP_REG_CORE);
+	reg &= ~(0x1f << 18);
+	reg |= (reg & 0x1f) << 18;
+	writel_relaxed(reg, anatop_base + ANATOP_REG_CORE);
+
+	/* gpu: power off pu reg1 */
+	reg = readl_relaxed(anatop_base + ANATOP_REG_CORE);
+	reg &= ~0x0003fe00;
+	writel_relaxed(reg, anatop_base + ANATOP_REG_CORE);
+}
+
+static void imx6q_anatop_post_resume(void)
+{
+	u32 reg;
+
+	/* restore registers */
+	writel_relaxed(anatop[0], anatop_base + ANATOP_REG_2P5);
+	writel_relaxed(anatop[1], anatop_base + ANATOP_REG_CORE);
+
+	/*gpu: power on pu reg1 */
+	reg = readl_relaxed(anatop_base + ANATOP_REG_CORE);
+	reg &= ~0x0003fe00;
+	reg |= 0x10 << 9; /* 1.1v */
+	writel_relaxed(reg, anatop_base + ANATOP_REG_CORE);
+	mdelay(10);
 }
 
 static int imx6q_pm_enter(suspend_state_t state)
 {
 	switch (state) {
 	case PM_SUSPEND_MEM:
-		imx6q_set_lpm(STOP_POWER_OFF);
-		imx_gpc_pre_suspend();
-		imx_set_cpu_jump(0, v7_cpu_resume);
-		/* Zzz ... */
-		cpu_suspend(0, imx6q_suspend_finish);
+		if (imx_gpc_wake_irq_pending())
+			return 0;
+
+		imx6q_ccm_pre_suspend(state);
+		imx6q_set_lpm(ARM_POWER_OFF);
+		imx_gpc_pre_suspend(state);
+		imx6q_ccm_gpu_pre_suspend();
+		imx6q_anatop_pre_suspend();
+
+		suspend_in_iram(state, (unsigned long)iram_paddr,
+			(unsigned long)suspend_iram_base);
+
 		imx_smp_prepare();
-		imx_gpc_post_resume();
+
+		imx6q_anatop_post_resume();
+		imx6q_ccm_gpu_post_resume();
+		imx_gpc_post_resume(state);
+		udelay(10);
+		imx6q_ccm_post_resume();
+		break;
+	case PM_SUSPEND_STANDBY:
+		if (imx_gpc_wake_irq_pending())
+			return 0;
+
+		imx6q_ccm_pre_suspend(state);
+		imx6q_set_lpm(STOP_POWER_OFF);
+		imx_gpc_pre_suspend(state);
+
+		suspend_in_iram(state, (unsigned long)iram_paddr,
+			(unsigned long)suspend_iram_base);
+
+		imx_smp_prepare();
+
+		imx_gpc_post_resume(state);
+		imx6q_ccm_post_resume();
 		break;
 	default:
 		return -EINVAL;
@@ -48,9 +127,14 @@
 	return 0;
 }
 
+static int imx6q_pm_valid(suspend_state_t state)
+{
+	return (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX);
+}
+
 static const struct platform_suspend_ops imx6q_pm_ops = {
 	.enter = imx6q_pm_enter,
-	.valid = suspend_valid_only_mem,
+	.valid = imx6q_pm_valid,
 };
 
 void __init imx6q_pm_init(void)
@@ -63,8 +147,29 @@
 	 * have to take care of restoring on its own.  So we save physical
 	 * address of the data structure used by l2x0 core to save registers,
 	 * and later restore the necessary ones in imx6q resume entry.
+	 * Need to run the suspend code from IRAM as the DDR needs
+	 * to be put into low power mode manually.
 	 */
 	phys_l2x0_saved_regs = __pa(&l2x0_saved_regs);
 
+	anatop_base = IMX_IO_ADDRESS(MX6Q_ANATOP_BASE_ADDR);
+
 	suspend_set_ops(&imx6q_pm_ops);
+
+	/* Move suspend routine into iRAM */
+	cpaddr = (unsigned long)iram_alloc(SZ_4K, &iram_paddr);
+	/* Need to remap the area here since we want the memory region
+		 to be executable. */
+	suspend_iram_base = __arm_ioremap(iram_paddr, SZ_4K,
+					  MT_MEMORY_NONCACHED);
+	pr_info("cpaddr = %x suspend_iram_base=%x\n",
+		(unsigned int)cpaddr, (unsigned int)suspend_iram_base);
+
+	/*
+	 * Need to run the suspend code from IRAM as the DDR needs
+	 * to be put into low power mode manually.
+	 */
+	memcpy((void *)cpaddr, mx6q_suspend, SZ_4K);
+
+	suspend_in_iram = (void *)suspend_iram_base;
 }
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index 9c6b01c..54cf7e1 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -29,7 +29,7 @@
 #define BP_SRC_SIMR_IPU1_MASK		2
 #define BP_SRC_SIMR_IPU2_MASK		4
 
-static void __iomem *src_base;
+void __iomem *src_base;
 
 #ifndef CONFIG_SMP
 #define cpu_logical_map(cpu)		0
diff --git a/arch/arm/mach-imx/suspend-imx6q.S b/arch/arm/mach-imx/suspend-imx6q.S
new file mode 100644
index 0000000..11a7863
--- /dev/null
+++ b/arch/arm/mach-imx/suspend-imx6q.S
@@ -0,0 +1,695 @@
+/*
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/linkage.h>
+#include <mach/hardware.h>
+#include <asm/memory.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#define MX6Q_PHYS_OFFSET         UL(0x10000000)
+
+#define ARM_CTRL_DCACHE		(1 << 2)
+#define ARM_CTRL_ICACHE		(1 << 12)
+#define ARM_AUXCR_L2EN		(1 << 1)
+#define TTRBIT_MASK 		0xffffc000
+#define TABLE_INDEX_MASK 	0xfff00000
+#define TABLE_ENTRY 		0x00000c02
+#define CACHE_DISABLE_MASK 	0xffffe7fb
+#define MMDC_MAPSR_OFFSET 	0x404
+#define MMDC_MAPSR_PSS 		(1 << 4)
+#define MMDC_MAPSR_PSD 		(1 << 0)
+#define IRAM_SUSPEND_SIZE 	(1 << 12)
+
+#define SRC_SCR_OFFSET	0x000
+#define SRC_SBMR_OFFSET	0x004
+#define SRC_SRSR_OFFSET	0x008
+#define SRC_SAIAR_OFFSET	0x00c
+#define SRC_SAIRAR_OFFSET	0x010
+#define SRC_SISR_OFFSET	0x014
+#define SRC_SIMR_OFFSET	0x018
+#define SRC_SBMR2_OFFSET	0x01c
+#define SRC_GPR1_OFFSET	0x020
+#define SRC_GPR2_OFFSET	0x024
+#define SRC_GPR3_OFFSET	0x028
+#define SRC_GPR4_OFFSET	0x02c
+#define SRC_GPR5_OFFSET	0x030
+#define SRC_GPR6_OFFSET	0x034
+#define SRC_GPR7_OFFSET	0x038
+#define SRC_GPR8_OFFSET	0x03c
+#define SRC_GPR9_OFFSET	0x040
+#define SRC_GPR10_OFFSET	0x044
+
+#define BP_SRC_SCR_CORE0_RST	13
+#define BP_SRC_SCR_CORES_DBG_RST 21
+#define BP_SRC_SCR_CORE1_ENABLE 22
+
+/*************************************************************
+mx6q_suspend:
+
+Suspend the processor (eg, wait for interrupt).
+Set the DDR into Self Refresh
+IRQs are already disabled.
+
+The following code contain both standby and
+dormant mode for MX6, decided by the parameter
+passed in r0:
+see define in include/linux/suspend.h
+1 -> cpu enter stop mode;
+3 -> cpu enter dormant mode.
+r1: iram_paddr
+r2: suspend_iram_base
+*************************************************************/
+	.macro	ddr_io_save
+
+	ldr	r4, [r1, #0x5ac] /* DRAM_DQM0 */
+	ldr	r5, [r1, #0x5b4] /* DRAM_DQM1 */
+	ldr	r6, [r1, #0x528] /* DRAM_DQM2 */
+	ldr	r7, [r1, #0x520] /* DRAM_DQM3 */
+	stmfd	r0!, {r4-r7}
+
+	ldr	r4, [r1, #0x514] /* DRAM_DQM4 */
+	ldr	r5, [r1, #0x510] /* DRAM_DQM5 */
+	ldr	r6, [r1, #0x5bc] /* DRAM_DQM6 */
+	ldr	r7, [r1, #0x5c4] /* DRAM_DQM7 */
+	stmfd	r0!, {r4-r7}
+
+	ldr	r4, [r1, #0x56c] /* DRAM_CAS */
+	ldr	r5, [r1, #0x578] /* DRAM_RAS */
+	ldr	r6, [r1, #0x588] /* DRAM_SDCLK_0 */
+	ldr	r7, [r1, #0x594] /* DRAM_SDCLK_1 */
+	stmfd	r0!, {r4-r7}
+
+	ldr	r5, [r1, #0x750] /* DDRMODE_CTL */
+	ldr	r6, [r1, #0x774] /* DDRMODE */
+	stmfd	r0!, {r5-r6}
+
+	ldr	r4, [r1, #0x5a8] /* DRAM_SDQS0 */
+	ldr	r5, [r1, #0x5b0] /* DRAM_SDQS1 */
+	ldr	r6, [r1, #0x524] /* DRAM_SDQS2 */
+	ldr	r7, [r1, #0x51c] /* DRAM_SDQS3 */
+	stmfd	r0!, {r4-r7}
+
+	ldr	r4, [r1, #0x518] /* DRAM_SDQS4 */
+	ldr	r5, [r1, #0x50c] /* DRAM_SDQS5 */
+	ldr	r6, [r1, #0x5b8] /* DRAM_SDQS6 */
+	ldr	r7, [r1, #0x5c0] /* DRAM_SDQS7 */
+	stmfd	r0!, {r4-r7}
+
+	ldr	r4, [r1, #0x784] /* GPR_B0DS */
+	ldr	r5, [r1, #0x788] /* GPR_B1DS */
+	ldr	r6, [r1, #0x794] /* GPR_B2DS */
+	ldr	r7, [r1, #0x79c] /* GPR_B3DS */
+	stmfd	r0!, {r4-r7}
+
+	ldr	r4, [r1, #0x7a0] /* GPR_B4DS */
+	ldr	r5, [r1, #0x7a4] /* GPR_B5DS */
+	ldr	r6, [r1, #0x7a8] /* GPR_B6DS */
+	ldr	r7, [r1, #0x748] /* GPR_B7DS */
+	stmfd	r0!, {r4-r7}
+
+	ldr	r5, [r1, #0x74c] /* GPR_ADDS*/
+	ldr	r6, [r1, #0x59c] /* DRAM_SODT0*/
+	ldr	r7, [r1, #0x5a0] /* DRAM_SODT1*/
+	stmfd	r0!, {r5-r7}
+
+	.endm
+
+	.macro	ddr_io_restore
+
+	ldmea	r0!, {r4-r7}
+	str	r4, [r1, #0x5ac] /* DRAM_DQM0 */
+	str	r5, [r1, #0x5b4] /* DRAM_DQM1 */
+	str	r6, [r1, #0x528] /* DRAM_DQM2 */
+	str	r7, [r1, #0x520] /* DRAM_DQM3 */
+
+	ldmea	r0!, {r4-r7}
+	str	r4, [r1, #0x514] /* DRAM_DQM4 */
+	str	r5, [r1, #0x510] /* DRAM_DQM5 */
+	str	r6, [r1, #0x5bc] /* DRAM_DQM6 */
+	str	r7, [r1, #0x5c4] /* DRAM_DQM7 */
+
+	ldmea	r0!, {r4-r7}
+	str	r4, [r1, #0x56c] /* DRAM_CAS */
+	str	r5, [r1, #0x578] /* DRAM_RAS */
+	str	r6, [r1, #0x588] /* DRAM_SDCLK_0 */
+	str	r7, [r1, #0x594] /* DRAM_SDCLK_1 */
+
+	ldmea	r0!, {r5-r6}
+	str	r5, [r1, #0x750] /* DDRMODE_CTL */
+	str	r6, [r1, #0x774] /* DDRMODE */
+
+	ldmea	r0!, {r4-r7}
+	str	r4, [r1, #0x5a8] /* DRAM_SDQS0 */
+	str	r5, [r1, #0x5b0] /* DRAM_SDQS1 */
+	str	r6, [r1, #0x524] /* DRAM_SDQS2 */
+	str	r7, [r1, #0x51c] /* DRAM_SDQS3 */
+
+	ldmea	r0!, {r4-r7}
+	str	r4, [r1, #0x518] /* DRAM_SDQS4 */
+	str	r5, [r1, #0x50c] /* DRAM_SDQS5 */
+	str	r6, [r1, #0x5b8] /* DRAM_SDQS6 */
+	str	r7, [r1, #0x5c0] /* DRAM_SDQS7 */
+
+	ldmea	r0!, {r4-r7}
+	str	r4, [r1, #0x784] /* GPR_B0DS */
+	str	r5, [r1, #0x788] /* GPR_B1DS */
+	str	r6, [r1, #0x794] /* GPR_B2DS */
+	str	r7, [r1, #0x79c] /* GPR_B3DS */
+
+	ldmea	r0!, {r4-r7}
+	str	r4, [r1, #0x7a0] /* GPR_B4DS */
+	str	r5, [r1, #0x7a4] /* GPR_B5DS */
+	str	r6, [r1, #0x7a8] /* GPR_B6DS */
+	str	r7, [r1, #0x748] /* GPR_B7DS */
+
+	ldmea	r0!, {r5-r7}
+	str	r5, [r1, #0x74c] /* GPR_ADDS*/
+	str	r6, [r1, #0x59c] /* DRAM_SODT0*/
+	str	r7, [r1, #0x5a0] /* DRAM_SODT1*/
+
+	.endm
+
+	.macro	ddr_io_set_lpm
+
+	mov	r0, #0
+	str	r0, [r1, #0x5ac] /* DRAM_DQM0 */
+	str	r0, [r1, #0x5b4] /* DRAM_DQM1 */
+	str	r0, [r1, #0x528] /* DRAM_DQM2 */
+	str	r0, [r1, #0x520] /* DRAM_DQM3 */
+
+	str	r0, [r1, #0x514] /* DRAM_DQM4 */
+	str	r0, [r1, #0x510] /* DRAM_DQM5 */
+	str	r0, [r1, #0x5bc] /* DRAM_DQM6 */
+	str	r0, [r1, #0x5c4] /* DRAM_DQM7 */
+
+	str	r0, [r1, #0x56c] /* DRAM_CAS */
+	str	r0, [r1, #0x578] /* DRAM_RAS */
+	str	r0, [r1, #0x588] /* DRAM_SDCLK_0 */
+	str	r0, [r1, #0x594] /* DRAM_SDCLK_1 */
+
+	str	r0, [r1, #0x750] /* DDRMODE_CTL */
+	str	r0, [r1, #0x774] /* DDRMODE */
+
+	str	r0, [r1, #0x5a8] /* DRAM_SDQS0 */
+	str	r0, [r1, #0x5b0] /* DRAM_SDQS1 */
+	str	r0, [r1, #0x524] /* DRAM_SDQS2 */
+	str	r0, [r1, #0x51c] /* DRAM_SDQS3 */
+
+	str	r0, [r1, #0x518] /* DRAM_SDQS4 */
+	str	r0, [r1, #0x50c] /* DRAM_SDQS5 */
+	str	r0, [r1, #0x5b8] /* DRAM_SDQS6 */
+	str	r0, [r1, #0x5c0] /* DRAM_SDQS7 */
+
+	str	r0, [r1, #0x784] /* GPR_B0DS */
+	str	r0, [r1, #0x788] /* GPR_B1DS */
+	str	r0, [r1, #0x794] /* GPR_B2DS */
+	str	r0, [r1, #0x79c] /* GPR_B3DS */
+
+	str	r0, [r1, #0x7a0] /* GPR_B4DS */
+	str	r0, [r1, #0x7a4] /* GPR_B5DS */
+	str	r0, [r1, #0x7a8] /* GPR_B6DS */
+	str	r0, [r1, #0x748] /* GPR_B7DS */
+
+	str	r0, [r1, #0x74c] /* GPR_ADDS*/
+	str	r0, [r1, #0x59c] /* DRAM_SODT0*/
+	str	r0, [r1, #0x5a0] /* DRAM_SODT1*/
+
+	.endm
+
+ENTRY(mx6q_suspend)
+	stmfd	sp!, {r0-r12}     @ Save registers
+/*************************************************************
+suspend mode entry
+*************************************************************/
+
+	cmp	r0, #0x1
+	bne	dormant		/* dormant mode */
+
+	dsb
+	wfi
+
+	nop
+	nop
+	nop
+	nop
+	/* Due to the L2 cache errata(TKT065875)
+	, need to wait at least 170ns, each IO read
+	takes about 76ns, but the actual wait time
+	to make system more stable is about 380ns */
+	ldr	r1, =src_base
+	ldr	r0, [r1]
+	ldr	r1, [r0]
+	ldr	r1, [r0, #0x4]
+	ldr	r1, [r0, #0x8]
+	ldr	r1, [r0, #0xc]
+	ldr	r1, [r0, #0x10]
+/***********************************************************
+never run to here
+************************************************************/
+	b 	out	/* exit standby */
+
+/************************************************************
+dormant entry, data save in stack, save sp in the src_gpr2
+************************************************************/
+dormant:
+	mov 	r3, r1
+	mov 	r0, r1
+	add 	r0, r0, #IRAM_SUSPEND_SIZE /* 4K */
+	ldr 	r5, =src_base
+	ldr 	r4, [r5]
+	str 	r0, [r4, #SRC_GPR2_OFFSET] /* set src_gpr2 */
+/************************************************************
+saved register and context as below:
+	ddr_iomux set
+	sp
+	spsr
+	lr
+	CPACR
+	TTBR0
+	TTBR1
+	TTBCR
+	DACR
+	PRRR
+	NMRR
+	ACTLR
+	Context ID
+	User r/w thread ID
+	Secure or NS VBAR
+	CPSR
+	SCTLR
+************************************************************/
+ddr_iomux_save:
+	/* save mmdc iomux setting, stack is from the tail of
+	iram_suspend base */
+
+	mov	r0, r2 		/* get suspend_iram_base */
+	add	r0, r0, #IRAM_SUSPEND_SIZE	/* 4K */
+	ldr	r4, =iomux_base
+	ldr	r1, [r4]
+
+	ddr_io_save
+
+	mov	r4, sp			@ Store sp
+	mrs	r5, spsr		@ Store spsr
+	mov	r6, lr			@ Store lr
+	stmfd	r0!, {r4-r6}
+
+	/* c1 and c2 registers */
+	mrc	p15, 0, r4, c1, c0, 2	@ CPACR
+	mrc	p15, 0, r5, c2, c0, 0	@ TTBR0
+	mrc	p15, 0, r6, c2, c0, 1	@ TTBR1
+	mrc	p15, 0, r7, c2, c0, 2	@ TTBCR
+	stmfd	r0!, {r4-r7}
+
+	/* c3 and c10 registers */
+	mrc	p15, 0, r4, c3, c0, 0	@ DACR
+	mrc	p15, 0, r5, c10, c2, 0	@ PRRR
+	mrc	p15, 0, r6, c10, c2, 1	@ NMRR
+	mrc	p15, 0, r7, c1, c0, 1	@ ACTLR
+	stmfd	r0!,{r4-r7}
+
+	/* c12, c13 and CPSR registers */
+	mrc	p15, 0, r4, c13, c0, 1	@ Context ID
+	mrc	p15, 0, r5, c13, c0, 2	@ User r/w thread ID
+	mrc	p15, 0, r6, c12, c0, 0	@ Secure or NS VBAR
+	mrs	r7, cpsr		@ Store CPSR
+	stmfd	r0!, {r4-r7}
+
+	/* c1 control register */
+	mrc	p15, 0, r4, c1, c0, 0	@ SCTLR
+	stmfd	r0!, {r4}
+
+#ifdef CONFIG_CACHE_L2X0
+	ldr r4, =l2x0_base
+	ldr r2, [r4]
+
+	ldr	r4, [r2, #L2X0_CTRL]
+	ldr	r5, [r2, #L2X0_AUX_CTRL]
+	ldr	r6, [r2, #L2X0_TAG_LATENCY_CTRL]
+	ldr	r7, [r2, #L2X0_DATA_LATENCY_CTRL]
+	stmfd	r0!, {r4-r7}
+
+	ldr	r4, [r2, #L2X0_PREFETCH_CTRL]
+	ldr	r5, [r2, #L2X0_POWER_CTRL]
+	stmfd	r0!, {r4-r5}
+#endif
+	/*
+	 * Flush all data from the L1 data cache before disabling
+	 * SCTLR.C bit.
+	 */
+	push	{r0-r12, lr}
+	ldr	r0, =v7_flush_dcache_all
+	mov	lr, pc
+	mov	pc, r0
+	pop	{r0-r12, lr}
+
+	/*
+	 * Clear the SCTLR.C bit to prevent further data cache
+	 * allocation. Clearing SCTLR.C would make all the data accesses
+	 * strongly ordered and would not hit the cache.
+	 */
+	mrc	p15, 0, r0, c1, c0, 0
+	bic	r0, r0, #(1 << 2)	@ Disable the C bit
+	mcr	p15, 0, r0, c1, c0, 0
+	isb
+
+	/*
+	 * Invalidate L1 data cache. Even though only invalidate is
+	 * necessary exported flush API is used here. Doing clean
+	 * on already clean cache would be almost NOP.
+	 */
+	push	{r0-r12, lr}
+	ldr	r0, =v7_flush_dcache_all
+	mov	lr, pc
+	mov	pc, r0
+	pop	{r0-r12, lr}
+
+	/*
+	 * Execute an ISB instruction to ensure that all of the
+	 * CP15 register changes have been committed.
+	 */
+	isb
+
+	/*
+	 * Execute a barrier instruction to ensure that all cache,
+	 * TLB and branch predictor maintenance operations issued
+	 * by any CPU in the cluster have completed.
+	 */
+	dsb
+	dmb
+
+	/* Clean L2 cache to write the dirty data into DRAM to make
+	sure the data alignment between DRAM and L2 cache.
+	*/
+#ifdef CONFIG_CACHE_L2X0
+	/* Clean L2 cache here */
+	ldr	r0, =l2x0_base
+	ldr	r1, [r0]
+	/* Make way to 0xFFFF 16 ways */
+	mov	r0, #0x10000
+	sub	r0, r0, #0x1
+	/* 0x7BC is L2X0_CLEAN_WAY */
+	mov	r4, #0x700
+	orr	r4, #0xBC
+	str	r0, [r1, r4]
+
+wait:
+	ldr	r2, [r1, r4]
+	ands	r2, r2, r0
+	bne	wait
+l2x0_sync:
+	mov	r2, #0x0
+	/* 0x730 is L2X0_CACHE_SYNC */
+	mov	r4, #0x700
+	orr	r4, #0x30
+	str	r2, [r1, r4]
+sync:
+	ldr	r2, [r1, r4]
+	ands	r2, r2, #0x1
+	bne	sync
+#endif
+/****************************************************************
+set ddr iomux to low power mode
+****************************************************************/
+	ldr	r0, =mmdc_base_array
+	ldr	r1, [r0]
+	ldr	r0, [r1, #MMDC_MAPSR_OFFSET]
+	bic	r0, #MMDC_MAPSR_PSD 		/* enable lpm */
+	str	r0, [r1, #MMDC_MAPSR_OFFSET]
+refresh:
+	ldr	r0, [r1, #MMDC_MAPSR_OFFSET] 	/* MMDC_MAPSR */
+	and	r0, r0, #MMDC_MAPSR_PSS 	/* PSS bit */
+	cmp	r0, #0
+	beq	refresh
+
+	/* set mmdc iomux to low power mode */
+	ldr	r0, =iomux_base
+	ldr	r1, [r0]
+
+	ddr_io_set_lpm
+/****************************************************************
+save resume pointer into SRC_GPR1
+****************************************************************/
+	ldr	r0, =mx6q_suspend
+	ldr	r1, =resume
+	sub	r1, r1, r0
+	add	r3, r3, r1
+	ldr	r0, =src_base
+	ldr	r1, [r0]
+	str	r3, [r1, #SRC_GPR1_OFFSET]
+/****************************************************************
+execute a wfi instruction to let SOC go into stop mode.
+****************************************************************/
+	wfi
+
+	nop
+	nop
+	nop
+	nop
+
+/****************************************************************
+if go here, means there is a wakeup irq pending, we should resume
+system immediately.
+****************************************************************/
+	mov	r0, r2 		/* get suspend_iram_base */
+	add	r0, r0, #IRAM_SUSPEND_SIZE	/* 4K */
+
+	ldr	r3, =iomux_base
+	ldr	r1, [r3]
+
+	ddr_io_restore
+
+	mrc	p15, 0, r1, c1, c0, 0
+	orr	r1, r1, #(1 << 2)	@ Enable the C bit
+	mcr	p15, 0, r1, c1, c0, 0
+
+	b 	out	/* exit standby */
+
+/****************************************************************
+when SOC exit stop mode, arm core restart from here, currently
+are running with MMU off.
+****************************************************************/
+resume:
+	ldr	r0, =MX6Q_SRC_BASE_ADDR
+	/* Due to the L2 cache errata(TKT065875)
+	, need to wait at least 170ns, each IO read
+	takes about 76ns, but the actual wait time
+	to make system more stable is about 380ns */
+	ldr	r1, [r0]
+	ldr	r1, [r0, #0x4]
+	ldr	r1, [r0, #0x8]
+	ldr	r1, [r0, #0xc]
+	ldr	r1, [r0, #0x10]
+	mov	r1, #0x0
+	str	r1, [r0, #SRC_GPR1_OFFSET] /* clear SRC_GPR1 */
+	ldr	r0, [r0, #SRC_GPR2_OFFSET]
+
+	ldr	r1, =MX6Q_IOMUXC_BASE_ADDR
+	ddr_io_restore
+
+	/* Restore cp15 registers */
+	ldmea	r0!, {r4-r6}
+	mov	sp, r4
+	msr	spsr_cxsf, r5		@ Restore spsr
+	mov	lr, r6			@ Restore lr
+
+	/* c1 and c2 registers */
+	ldmea	r0!, {r4-r7}
+	mcr	p15, 0, r4, c1, c0, 2	@ CPACR
+	mcr	p15, 0, r5, c2, c0, 0	@ TTBR0
+	mcr	p15, 0, r6, c2, c0, 1	@ TTBR1
+	mcr	p15, 0, r7, c2, c0, 2	@ TTBCR
+
+	/* c3 and c10 registers */
+	ldmea	r0!,{r4-r7}
+	mcr	p15, 0, r4, c3, c0, 0	@ DACR
+	mcr	p15, 0, r5, c10, c2, 0	@ PRRR
+	mcr	p15, 0, r6, c10, c2, 1	@ NMRR
+	mcr	p15, 0, r7, c1, c0, 1	@ ACTLR
+
+	/* c12, c13 and CPSR registers */
+	ldmea	r0!,{r4-r7}
+	mcr	p15, 0, r4, c13, c0, 1	@ Context ID
+	mcr	p15, 0, r5, c13, c0, 2	@ User r/w thread ID
+	mrc	p15, 0, r6, c12, c0, 0	@ Secure or NS VBAR
+	msr	cpsr, r7		@ store cpsr
+
+	/*
+	 * Enabling MMU here. Page entry needs to be altered
+	 * to create temporary 1:1 map and then resore the entry
+	 * ones MMU is enabled
+	 */
+	mrc	p15, 0, r7, c2, c0, 2	@ Read TTBRControl
+	and	r7, #0x7		@ Extract N (0:2) to decide
+	cmp	r7, #0x0		@ TTBR0/TTBR1
+	beq	use_ttbr0
+ttbr_error:
+	b	ttbr_error		@ Only N = 0 supported
+use_ttbr0:
+	mrc	p15, 0, r2, c2, c0, 0	@ Read TTBR0
+	ldr	r5, =TTRBIT_MASK
+	and	r2, r5
+	mov	r4, pc
+	ldr	r5, =TABLE_INDEX_MASK
+	and	r4, r5			@ r4 = 31 to 20 bits of pc
+	ldr	r1, =TABLE_ENTRY
+	add	r1, r1, r4		@ r1 has value of table entry
+	lsr	r4, #18			@ Address of table entry
+	add	r2, r4			@ r2 - location to be modified
+
+	/* Storing previous entry of location being modified */
+	ldr	r4, [r2]
+	mov	r9, r4
+	str	r1, [r2]
+
+	/*
+	 * Storing address of entry being modified
+	 * It will be restored after enabling MMU
+	 */
+	mov 	r10, r2
+
+	mov	r1, #0
+	mcr	p15, 0, r1, c7, c5, 4	@ Flush prefetch buffer
+	mcr	p15, 0, r1, c7, c5, 6	@ Invalidate BTB
+	mcr	p15, 0, r1, c8, c5, 0	@ Invalidate ITLB
+	mcr	p15, 0, r1, c8, c6, 0	@ Invalidate DTLB
+
+	/*
+	 * Restore control register  but don't enable Data caches here.
+	 * Caches will be enabled after restoring MMU table entry.
+	 */
+	ldmea	r0!, {r4}
+	mov 	r11, r4
+	ldr	r2, =CACHE_DISABLE_MASK
+	and	r4, r4, r2
+	mcr	p15, 0, r4, c1, c0, 0
+	isb
+	dsb
+	ldr	r1, =mmu_on_label
+	bx	r1
+mmu_on_label:
+	mov 	r8, lr
+	/* Set up the per-CPU stacks */
+
+	/* protect r0 */
+	push	{r0}
+	bl	cpu_init
+	pop	{r0}
+
+#ifdef CONFIG_CACHE_L2X0
+	ldr r4, =l2x0_base
+	ldr r2, [r4]
+
+	ldmea	r0!, {r4-r7}
+	/* L2 will be enabled after L1 is enabled */
+	mov 	r4, #0x0
+	str	r4, [r2, #L2X0_CTRL]
+	str	r5, [r2, #L2X0_AUX_CTRL]
+	str	r6, [r2, #L2X0_TAG_LATENCY_CTRL]
+	str	r7, [r2, #L2X0_DATA_LATENCY_CTRL]
+
+	ldmea	r0!, {r4-r5}
+	str	r4, [r2, #L2X0_PREFETCH_CTRL]
+	str	r5, [r2, #L2X0_POWER_CTRL]
+#endif
+	/*
+	 * Restore the MMU table entry that was modified for
+	 * enabling MMU.
+	 */
+	ldr	r4, =PAGE_OFFSET
+	ldr	r5, =MX6Q_PHYS_OFFSET
+	sub	r4, r4, r5
+	add	r4, r4, r10
+	str	r9, [r4]
+
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c1, 6	@ flush TLB and issue barriers
+	mcr	p15, 0, r0, c7, c5, 4	@ Flush prefetch buffer
+	mcr	p15, 0, r0, c7, c5, 6	@ Invalidate BTB
+	mcr	p15, 0, r0, c8, c5, 0	@ Invalidate ITLB
+	mcr	p15, 0, r0, c8, c6, 0	@ Invalidate DTLB
+	dsb
+	isb
+
+/******************************************************************
+invalidate l1 dcache, r0-r4, r6, r7 used
+******************************************************************/
+	mov     r0, #0
+	mcr     p15, 2, r0, c0, c0, 0
+	mrc     p15, 1, r0, c0, c0, 0
+
+	ldr     r1, =0x7fff
+	and     r2, r1, r0, lsr #13
+
+	ldr     r1, =0x3ff
+
+	and     r3, r1, r0, lsr #3	@ NumWays - 1
+	add     r2, r2, #1		@ NumSets
+
+	and     r0, r0, #0x7
+	add     r0, r0, #4		@ SetShift
+
+	clz     r1, r3			@ WayShift
+	add     r4, r3, #1		@ NumWays
+1:
+	sub     r2, r2, #1		@ NumSets--
+	mov     r3, r4			@ Temp = NumWays
+2:
+	subs    r3, r3, #1		@ Temp--
+	mov     r7, r3, lsl r1
+	mov     r6, r2, lsl r0
+	orr     r7, r7, r6
+	mcr     p15, 0, r7, c7, c6, 2
+	bgt     2b
+	cmp     r2, #0
+	bgt     1b
+	dsb
+	isb
+
+/************************************************************
+restore control register to enable cache
+************************************************************/
+	mov	r0, r11
+	mcr	p15, 0, r0, c1, c0, 0	@ with caches enabled.
+	dsb
+	isb
+
+#ifdef CONFIG_CACHE_L2X0
+	/* Enable L2 cache here */
+	ldr r4, =l2x0_base
+	ldr r2, [r4]
+	mov r4, #0x1
+	str	r4, [r2, #L2X0_CTRL]
+#endif
+/***********************************************************
+return back to mx6_suspend_enter for dormant
+***********************************************************/
+	mov	lr, r8
+	ldmfd	sp!, {r0-r12}
+	mov	pc, lr
+/************************************************
+return back to mx6_suspend_enter for suspend
+*************************************************/
+out:
+	ldmfd	sp!, {r0-r12}
+	mov	pc, lr
+
+	.type	mx6q_do_suspend, #object
+ENTRY(mx6q_do_suspend)
+	.word	mx6q_suspend
+	.size	mx6q_suspend, . - mx6q_suspend
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index b1e192b..9c92706 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -28,7 +28,7 @@
 
 #define CACHE_LINE_SIZE		32
 
-static void __iomem *l2x0_base;
+void __iomem *l2x0_base;
 static DEFINE_RAW_SPINLOCK(l2x0_lock);
 static uint32_t l2x0_way_mask;	/* Bitmask of active ways */
 static uint32_t l2x0_size;
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index 4c28260..d32afc9 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -11,6 +11,8 @@
 #ifndef __ASM_ARCH_MXC_COMMON_H__
 #define __ASM_ARCH_MXC_COMMON_H__
 
+#include <linux/suspend.h>
+
 struct platform_device;
 struct clk;
 enum mxc_cpu_pwr_mode;
@@ -82,6 +84,7 @@
 	WAIT_UNCLOCKED_POWER_OFF,	/* WAIT + SRPG */
 	STOP_POWER_ON,		/* just STOP */
 	STOP_POWER_OFF,		/* STOP + SRPG */
+	ARM_POWER_OFF,          /* STOP + SRPG + ARM power off */
 };
 
 extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode);
@@ -126,8 +129,9 @@
 extern void imx_set_cpu_jump(int cpu, void *jump_addr);
 extern void imx_src_init(void);
 extern void imx_gpc_init(void);
-extern void imx_gpc_pre_suspend(void);
-extern void imx_gpc_post_resume(void);
+extern bool imx_gpc_wake_irq_pending(void);
+extern void imx_gpc_pre_suspend(suspend_state_t state);
+extern void imx_gpc_post_resume(suspend_state_t state);
 extern void imx51_babbage_common_init(void);
 extern void imx53_ard_common_init(void);
 extern void imx53_evk_common_init(void);
@@ -136,4 +140,8 @@
 extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
 extern void imx6q_pm_init(void);
 extern void imx6q_clock_map_io(void);
+extern void imx6q_ccm_pre_suspend(suspend_state_t state);
+extern void imx6q_ccm_gpu_pre_suspend(void);
+extern void imx6q_ccm_gpu_post_resume(void);
+extern void imx6q_ccm_post_resume(void);
 #endif
diff --git a/arch/arm/plat-mxc/include/mach/mx6q.h b/arch/arm/plat-mxc/include/mach/mx6q.h
index 2bd3ba2..596c10d 100644
--- a/arch/arm/plat-mxc/include/mach/mx6q.h
+++ b/arch/arm/plat-mxc/include/mach/mx6q.h
@@ -41,4 +41,6 @@
 #define MX6Q_IPU1_BASE_ADDR         	0x02400000
 #define MX6Q_IPU2_BASE_ADDR         	0x02800000
 
+#define MX6Q_SRC_BASE_ADDR		0x020d8000
+
 #endif	/* __MACH_MX6Q_H__ */