aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c2
-rw-r--r--arch/powerpc/platforms/44x/currituck.c10
-rw-r--r--arch/powerpc/platforms/512x/Kconfig1
-rw-r--r--arch/powerpc/platforms/512x/clock.c6
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_generic.c2
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c6
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c35
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c2
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig21
-rw-r--r--arch/powerpc/platforms/85xx/Makefile2
-rw-r--r--arch/powerpc/platforms/85xx/common.c10
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c38
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c62
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c36
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c11
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c44
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c15
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c40
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c30
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c14
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c36
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c167
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rds.c9
-rw-r--r--arch/powerpc/platforms/85xx/p2041_rdb.c2
-rw-r--r--arch/powerpc/platforms/85xx/p3041_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/p4080_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/p5020_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/p5040_ds.c89
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c5
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c21
-rw-r--r--arch/powerpc/platforms/85xx/smp.c220
-rw-r--r--arch/powerpc/platforms/85xx/socrates.c11
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c13
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c23
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c56
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c12
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c13
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c12
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c21
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c42
-rw-r--r--arch/powerpc/platforms/86xx/sbc8641d.c14
-rw-r--r--arch/powerpc/platforms/cell/beat.c4
-rw-r--r--arch/powerpc/platforms/cell/beat.h2
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c45
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c6
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c695
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.c7
-rw-r--r--arch/powerpc/platforms/powernv/pci.h21
-rw-r--r--arch/powerpc/platforms/ps3/htab.c22
-rw-r--r--arch/powerpc/platforms/ps3/setup.c10
-rw-r--r--arch/powerpc/platforms/pseries/Makefile5
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c543
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c59
-rw-r--r--arch/powerpc/platforms/pseries/eeh_dev.c14
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c310
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c54
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pe.c652
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c247
-rw-r--r--arch/powerpc/platforms/pseries/eeh_sysfs.c9
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c12
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c77
-rw-r--r--arch/powerpc/platforms/pseries/msi.c26
-rw-r--r--arch/powerpc/platforms/pseries/pci.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c32
-rw-r--r--arch/powerpc/platforms/pseries/setup.c22
69 files changed, 2190 insertions, 1851 deletions
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index 97612068fae3..969dddcf3320 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -50,7 +50,7 @@ machine_device_initcall(ppc40x_simple, ppc40x_device_probe);
* Again, if your board needs to do things differently then create a
* board.c file for it rather than adding it to this list.
*/
-static const char *board[] __initdata = {
+static const char * const board[] __initconst = {
"amcc,acadia",
"amcc,haleakala",
"amcc,kilauea",
diff --git a/arch/powerpc/platforms/44x/currituck.c b/arch/powerpc/platforms/44x/currituck.c
index 9f6c33d63a42..6bd89a0e0dea 100644
--- a/arch/powerpc/platforms/44x/currituck.c
+++ b/arch/powerpc/platforms/44x/currituck.c
@@ -21,7 +21,6 @@
*/
#include <linux/init.h>
-#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/rtc.h>
@@ -159,13 +158,8 @@ static void __init ppc47x_setup_arch(void)
/* No need to check the DMA config as we /know/ our windows are all of
* RAM. Lets hope that doesn't change */
-#ifdef CONFIG_SWIOTLB
- if ((memblock_end_of_DRAM() - 1) > 0xffffffff) {
- ppc_swiotlb_enable = 1;
- set_pci_dma_ops(&swiotlb_dma_ops);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
- }
-#endif
+ swiotlb_detect_4g();
+
ppc47x_smp_init();
}
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index c16999802ecf..b62508b113db 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -2,6 +2,7 @@ config PPC_MPC512x
bool "512x-based boards"
depends on 6xx
select FSL_SOC
+ select FB_FSL_DIU
select IPIC
select PPC_CLOCK
select PPC_PCI_CHOICE
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c
index 1d8700ff60b0..9f771e05457c 100644
--- a/arch/powerpc/platforms/512x/clock.c
+++ b/arch/powerpc/platforms/512x/clock.c
@@ -54,14 +54,16 @@ static DEFINE_MUTEX(clocks_mutex);
static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
{
struct clk *p, *clk = ERR_PTR(-ENOENT);
- int dev_match = 0;
- int id_match = 0;
+ int dev_match;
+ int id_match;
if (dev == NULL || id == NULL)
return clk;
mutex_lock(&clocks_mutex);
list_for_each_entry(p, &clocks, node) {
+ dev_match = id_match = 0;
+
if (dev == p->dev)
dev_match++;
if (strcmp(id, p->name) == 0)
diff --git a/arch/powerpc/platforms/512x/mpc5121_generic.c b/arch/powerpc/platforms/512x/mpc5121_generic.c
index 926731f1ff01..ca1ca6669990 100644
--- a/arch/powerpc/platforms/512x/mpc5121_generic.c
+++ b/arch/powerpc/platforms/512x/mpc5121_generic.c
@@ -26,7 +26,7 @@
/*
* list of supported boards
*/
-static const char *board[] __initdata = {
+static const char * const board[] __initconst = {
"prt,prtlvt",
NULL
};
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index cfe958e94e1e..1650e090ef3a 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -191,8 +191,6 @@ mpc512x_valid_monitor_port(enum fsl_diu_monitor_port port)
static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb;
-#if defined(CONFIG_FB_FSL_DIU) || \
- defined(CONFIG_FB_FSL_DIU_MODULE)
static inline void mpc512x_free_bootmem(struct page *page)
{
__ClearPageReserved(page);
@@ -220,7 +218,6 @@ void mpc512x_release_bootmem(void)
}
diu_ops.release_bootmem = NULL;
}
-#endif
/*
* Check if DIU was pre-initialized. If so, perform steps
@@ -323,15 +320,12 @@ void __init mpc512x_setup_diu(void)
}
}
-#if defined(CONFIG_FB_FSL_DIU) || \
- defined(CONFIG_FB_FSL_DIU_MODULE)
diu_ops.get_pixel_format = mpc512x_get_pixel_format;
diu_ops.set_gamma_table = mpc512x_set_gamma_table;
diu_ops.set_monitor_port = mpc512x_set_monitor_port;
diu_ops.set_pixel_clock = mpc512x_set_pixel_clock;
diu_ops.valid_monitor_port = mpc512x_valid_monitor_port;
diu_ops.release_bootmem = mpc512x_release_bootmem;
-#endif
}
void __init mpc512x_init_IRQ(void)
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index 01ffa64d2aa7..448d862bcf3d 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -172,7 +172,7 @@ static void __init lite5200_setup_arch(void)
mpc52xx_setup_pci();
}
-static const char *board[] __initdata = {
+static const char * const board[] __initconst = {
"fsl,lite5200",
"fsl,lite5200b",
NULL,
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 17d91b7da315..070d315dd6cd 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -232,7 +232,7 @@ static void __init media5200_setup_arch(void)
}
/* list of the supported boards */
-static const char *board[] __initdata = {
+static const char * const board[] __initconst = {
"fsl,media5200",
NULL
};
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index c0aa04068d69..9cf36020cf0d 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -52,6 +52,7 @@ static void __init mpc5200_simple_setup_arch(void)
static const char *board[] __initdata = {
"anonymous,a4m072",
"anon,charon",
+ "ifm,o2d",
"intercontrol,digsy-mtc",
"manroland,mucmc52",
"manroland,uc101",
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index d61fb1c0c1a0..2351f9e0fb6f 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -170,7 +170,8 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields);
/* Kick it off */
- out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
+ if (!lpbfifo.req->defer_xfer_start)
+ out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
if (dma)
bcom_enable(lpbfifo.bcom_cur_task);
}
@@ -421,6 +422,38 @@ int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req)
}
EXPORT_SYMBOL(mpc52xx_lpbfifo_submit);
+int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req)
+{
+ unsigned long flags;
+
+ if (!lpbfifo.regs)
+ return -ENODEV;
+
+ spin_lock_irqsave(&lpbfifo.lock, flags);
+
+ /*
+ * If the req pointer is already set and a transfer was
+ * started on submit, then this transfer is in progress
+ */
+ if (lpbfifo.req && !lpbfifo.req->defer_xfer_start) {
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+ return -EBUSY;
+ }
+
+ /*
+ * If the req was previously submitted but not
+ * started, start it now
+ */
+ if (lpbfifo.req && lpbfifo.req == req &&
+ lpbfifo.req->defer_xfer_start) {
+ out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
+ }
+
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(mpc52xx_lpbfifo_start_xfer);
+
void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
{
unsigned long flags;
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 16c9c9cbbb7f..eca1f0960fff 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -60,7 +60,7 @@ static void __init mpc837x_rdb_setup_arch(void)
machine_device_initcall(mpc837x_rdb, mpc83xx_declare_of_platform_devices);
-static const char *board[] __initdata = {
+static const char * const board[] __initconst = {
"fsl,mpc8377rdb",
"fsl,mpc8378rdb",
"fsl,mpc8379rdb",
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 159c01e91463..02d02a09942d 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -104,6 +104,13 @@ config P1022_DS
help
This option enables support for the Freescale P1022DS reference board.
+config P1022_RDK
+ bool "Freescale / iVeia P1022 RDK"
+ select DEFAULT_UIMAGE
+ help
+ This option enables support for the Freescale / iVeia P1022RDK
+ reference board.
+
config P1023_RDS
bool "Freescale P1023 RDS"
select DEFAULT_UIMAGE
@@ -254,6 +261,20 @@ config P5020_DS
help
This option enables support for the P5020 DS board
+config P5040_DS
+ bool "Freescale P5040 DS"
+ select DEFAULT_UIMAGE
+ select E500
+ select PPC_E500MC
+ select PHYS_64BIT
+ select SWIOTLB
+ select ARCH_REQUIRE_GPIOLIB
+ select GPIO_MPC8XXX
+ select HAS_RAPIDIO
+ select PPC_EPAPR_HV_PIC
+ help
+ This option enables support for the P5040 DS board
+
config PPC_QEMU_E500
bool "QEMU generic e500 platform"
depends on EXPERIMENTAL
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 3dfe81175036..76f679cb04a0 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -15,11 +15,13 @@ obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o
obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o
obj-$(CONFIG_P1010_RDB) += p1010rdb.o
obj-$(CONFIG_P1022_DS) += p1022_ds.o
+obj-$(CONFIG_P1022_RDK) += p1022_rdk.o
obj-$(CONFIG_P1023_RDS) += p1023_rds.o
obj-$(CONFIG_P2041_RDB) += p2041_rdb.o corenet_ds.o
obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o
obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o
obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o
+obj-$(CONFIG_P5040_DS) += p5040_ds.o corenet_ds.o
obj-$(CONFIG_STX_GP3) += stx_gp3.o
obj-$(CONFIG_TQM85xx) += tqm85xx.o
obj-$(CONFIG_SBC8548) += sbc8548.o
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 67dac22b4363..d0861a0d8360 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -27,6 +27,16 @@ static struct of_device_id __initdata mpc85xx_common_ids[] = {
{ .compatible = "fsl,mpc8548-guts", },
/* Probably unnecessary? */
{ .compatible = "gpio-leds", },
+ /* For all PCI controllers */
+ { .compatible = "fsl,mpc8540-pci", },
+ { .compatible = "fsl,mpc8548-pcie", },
+ { .compatible = "fsl,p1022-pcie", },
+ { .compatible = "fsl,p1010-pcie", },
+ { .compatible = "fsl,p1023-pcie", },
+ { .compatible = "fsl,p4080-pcie", },
+ { .compatible = "fsl,qoriq-pcie-v2.4", },
+ { .compatible = "fsl,qoriq-pcie-v2.3", },
+ { .compatible = "fsl,qoriq-pcie-v2.2", },
{},
};
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
index 925b02874233..ed69c9250717 100644
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -16,7 +16,6 @@
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/memblock.h>
#include <asm/time.h>
#include <asm/machdep.h>
@@ -52,37 +51,16 @@ void __init corenet_ds_pic_init(void)
*/
void __init corenet_ds_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
- struct pci_controller *hose;
-#endif
- dma_addr_t max = 0xffffffff;
-
mpc85xx_smp_init();
-#ifdef CONFIG_PCI
- for_each_node_by_type(np, "pci") {
- if (of_device_is_compatible(np, "fsl,p4080-pcie") ||
- of_device_is_compatible(np, "fsl,qoriq-pcie-v2.2")) {
- fsl_add_bridge(np, 0);
- hose = pci_find_hose_for_OF_device(np);
- max = min(max, hose->dma_window_base_cur +
- hose->dma_window_size);
- }
- }
-
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PCI) && defined(CONFIG_PPC64)
pci_devs_phb_init();
#endif
-#endif
-#ifdef CONFIG_SWIOTLB
- if ((memblock_end_of_DRAM() - 1) > max) {
- ppc_swiotlb_enable = 1;
- set_pci_dma_ops(&swiotlb_dma_ops);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
- }
-#endif
+ fsl_pci_assign_primary();
+
+ swiotlb_detect_4g();
+
pr_info("%s board from Freescale Semiconductor\n", ppc_md.name);
}
@@ -99,6 +77,12 @@ static const struct of_device_id of_device_ids[] __devinitconst = {
{
.compatible = "fsl,qoriq-pcie-v2.2",
},
+ {
+ .compatible = "fsl,qoriq-pcie-v2.3",
+ },
+ {
+ .compatible = "fsl,qoriq-pcie-v2.4",
+ },
/* The following two are for the Freescale hypervisor */
{
.name = "hypervisor",
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index b6a728b0a8ca..e6285ae6f423 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -22,7 +22,6 @@
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/of_platform.h>
-#include <linux/memblock.h>
#include <asm/time.h>
#include <asm/machdep.h>
@@ -84,53 +83,39 @@ void __init ge_imp3a_pic_init(void)
of_node_put(cascade_node);
}
-#ifdef CONFIG_PCI
-static int primary_phb_addr;
-#endif /* CONFIG_PCI */
-
-/*
- * Setup the architecture
- */
-static void __init ge_imp3a_setup_arch(void)
+static void ge_imp3a_pci_assign_primary(void)
{
- struct device_node *regs;
#ifdef CONFIG_PCI
struct device_node *np;
- struct pci_controller *hose;
-#endif
- dma_addr_t max = 0xffffffff;
+ struct resource rsrc;
- if (ppc_md.progress)
- ppc_md.progress("ge_imp3a_setup_arch()", 0);
-
-#ifdef CONFIG_PCI
for_each_node_by_type(np, "pci") {
if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
of_device_is_compatible(np, "fsl,mpc8548-pcie") ||
of_device_is_compatible(np, "fsl,p2020-pcie")) {
- struct resource rsrc;
of_address_to_resource(np, 0, &rsrc);
- if ((rsrc.start & 0xfffff) == primary_phb_addr)
- fsl_add_bridge(np, 1);
- else
- fsl_add_bridge(np, 0);
-
- hose = pci_find_hose_for_OF_device(np);
- max = min(max, hose->dma_window_base_cur +
- hose->dma_window_size);
+ if ((rsrc.start & 0xfffff) == 0x9000)
+ fsl_pci_primary = np;
}
}
#endif
+}
+
+/*
+ * Setup the architecture
+ */
+static void __init ge_imp3a_setup_arch(void)
+{
+ struct device_node *regs;
+
+ if (ppc_md.progress)
+ ppc_md.progress("ge_imp3a_setup_arch()", 0);
mpc85xx_smp_init();
-#ifdef CONFIG_SWIOTLB
- if ((memblock_end_of_DRAM() - 1) > max) {
- ppc_swiotlb_enable = 1;
- set_pci_dma_ops(&swiotlb_dma_ops);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
- }
-#endif
+ ge_imp3a_pci_assign_primary();
+
+ swiotlb_detect_4g();
/* Remap basic board registers */
regs = of_find_compatible_node(NULL, NULL, "ge,imp3a-fpga-regs");
@@ -215,17 +200,10 @@ static int __init ge_imp3a_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "ge,IMP3A")) {
-#ifdef CONFIG_PCI
- primary_phb_addr = 0x9000;
-#endif
- return 1;
- }
-
- return 0;
+ return of_flat_dt_is_compatible(root, "ge,IMP3A");
}
-machine_device_initcall(ge_imp3a, mpc85xx_common_publish_devices);
+machine_arch_initcall(ge_imp3a, mpc85xx_common_publish_devices);
machine_arch_initcall(ge_imp3a, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index 767c7cf18a9c..15ce4b55f117 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -17,7 +17,6 @@
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/of_platform.h>
-#include <linux/memblock.h>
#include <asm/time.h>
#include <asm/machdep.h>
@@ -46,46 +45,17 @@ void __init mpc8536_ds_pic_init(void)
*/
static void __init mpc8536_ds_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
- struct pci_controller *hose;
-#endif
- dma_addr_t max = 0xffffffff;
-
if (ppc_md.progress)
ppc_md.progress("mpc8536_ds_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_node_by_type(np, "pci") {
- if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
- of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
- struct resource rsrc;
- of_address_to_resource(np, 0, &rsrc);
- if ((rsrc.start & 0xfffff) == 0x8000)
- fsl_add_bridge(np, 1);
- else
- fsl_add_bridge(np, 0);
-
- hose = pci_find_hose_for_OF_device(np);
- max = min(max, hose->dma_window_base_cur +
- hose->dma_window_size);
- }
- }
-
-#endif
+ fsl_pci_assign_primary();
-#ifdef CONFIG_SWIOTLB
- if ((memblock_end_of_DRAM() - 1) > max) {
- ppc_swiotlb_enable = 1;
- set_pci_dma_ops(&swiotlb_dma_ops);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
- }
-#endif
+ swiotlb_detect_4g();
printk("MPC8536 DS board from Freescale Semiconductor\n");
}
-machine_device_initcall(mpc8536_ds, mpc85xx_common_publish_devices);
+machine_arch_initcall(mpc8536_ds, mpc85xx_common_publish_devices);
machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 29ee8fcd75a2..7d12a19aa7ee 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -137,10 +137,6 @@ static void __init init_ioports(void)
static void __init mpc85xx_ads_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("mpc85xx_ads_setup_arch()", 0);
@@ -150,11 +146,10 @@ static void __init mpc85xx_ads_setup_arch(void)
#endif
#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
- fsl_add_bridge(np, 1);
-
ppc_md.pci_exclude_device = mpc85xx_exclude_device;
#endif
+
+ fsl_pci_assign_primary();
}
static void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
@@ -173,7 +168,7 @@ static void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
}
-machine_device_initcall(mpc85xx_ads, mpc85xx_common_publish_devices);
+machine_arch_initcall(mpc85xx_ads, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 11156fb53d83..c474505ad0d0 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -276,6 +276,33 @@ machine_device_initcall(mpc85xx_cds, mpc85xx_cds_8259_attach);
#endif /* CONFIG_PPC_I8259 */
+static void mpc85xx_cds_pci_assign_primary(void)
+{
+#ifdef CONFIG_PCI
+ struct device_node *np;
+
+ if (fsl_pci_primary)
+ return;
+
+ /*
+ * MPC85xx_CDS has ISA bridge but unfortunately there is no
+ * isa node in device tree. We now looking for i8259 node as
+ * a workaround for such a broken device tree. This routine
+ * is for complying to all device trees.
+ */
+ np = of_find_node_by_name(NULL, "i8259");
+ while ((fsl_pci_primary = of_get_parent(np))) {
+ of_node_put(np);
+ np = fsl_pci_primary;
+
+ if ((of_device_is_compatible(np, "fsl,mpc8540-pci") ||
+ of_device_is_compatible(np, "fsl,mpc8548-pcie")) &&
+ of_device_is_available(np))
+ return;
+ }
+#endif
+}
+
/*
* Setup the architecture
*/
@@ -309,21 +336,12 @@ static void __init mpc85xx_cds_setup_arch(void)
}
#ifdef CONFIG_PCI
- for_each_node_by_type(np, "pci") {
- if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
- of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
- struct resource rsrc;
- of_address_to_resource(np, 0, &rsrc);
- if ((rsrc.start & 0xfffff) == 0x8000)
- fsl_add_bridge(np, 1);
- else
- fsl_add_bridge(np, 0);
- }
- }
-
ppc_md.pci_irq_fixup = mpc85xx_cds_pci_irq_fixup;
ppc_md.pci_exclude_device = mpc85xx_exclude_device;
#endif
+
+ mpc85xx_cds_pci_assign_primary();
+ fsl_pci_assign_primary();
}
static void mpc85xx_cds_show_cpuinfo(struct seq_file *m)
@@ -355,7 +373,7 @@ static int __init mpc85xx_cds_probe(void)
return of_flat_dt_is_compatible(root, "MPC85xxCDS");
}
-machine_device_initcall(mpc85xx_cds, mpc85xx_common_publish_devices);
+machine_arch_initcall(mpc85xx_cds, mpc85xx_common_publish_devices);
define_machine(mpc85xx_cds) {
.name = "MPC85xx CDS",
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 6d3265fe7718..9ebb91ed96a3 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -20,7 +20,6 @@
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/of_platform.h>
-#include <linux/memblock.h>
#include <asm/time.h>
#include <asm/machdep.h>
@@ -129,13 +128,11 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
}
#endif /* CONFIG_PCI */
-static void __init mpc85xx_ds_pci_init(void)
+static void __init mpc85xx_ds_uli_init(void)
{
#ifdef CONFIG_PCI
struct device_node *node;
- fsl_pci_init();
-
/* See if we have a ULI under the primary */
node = of_find_node_by_name(NULL, "uli1575");
@@ -159,7 +156,9 @@ static void __init mpc85xx_ds_setup_arch(void)
if (ppc_md.progress)
ppc_md.progress("mpc85xx_ds_setup_arch()", 0);
- mpc85xx_ds_pci_init();
+ swiotlb_detect_4g();
+ fsl_pci_assign_primary();
+ mpc85xx_ds_uli_init();
mpc85xx_smp_init();
printk("MPC85xx DS board from Freescale Semiconductor\n");
@@ -175,9 +174,9 @@ static int __init mpc8544_ds_probe(void)
return !!of_flat_dt_is_compatible(root, "MPC8544DS");
}
-machine_device_initcall(mpc8544_ds, mpc85xx_common_publish_devices);
-machine_device_initcall(mpc8572_ds, mpc85xx_common_publish_devices);
-machine_device_initcall(p2020_ds, mpc85xx_common_publish_devices);
+machine_arch_initcall(mpc8544_ds, mpc85xx_common_publish_devices);
+machine_arch_initcall(mpc8572_ds, mpc85xx_common_publish_devices);
+machine_arch_initcall(p2020_ds, mpc85xx_common_publish_devices);
machine_arch_initcall(mpc8544_ds, swiotlb_setup_bus_notifier);
machine_arch_initcall(mpc8572_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 8e4b094c553b..8498f7323470 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -327,44 +327,16 @@ static void __init mpc85xx_mds_qeic_init(void) { }
static void __init mpc85xx_mds_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct pci_controller *hose;
- struct device_node *np;
-#endif
- dma_addr_t max = 0xffffffff;
-
if (ppc_md.progress)
ppc_md.progress("mpc85xx_mds_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_node_by_type(np, "pci") {
- if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
- of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
- struct resource rsrc;
- of_address_to_resource(np, 0, &rsrc);
- if ((rsrc.start & 0xfffff) == 0x8000)
- fsl_add_bridge(np, 1);
- else
- fsl_add_bridge(np, 0);
-
- hose = pci_find_hose_for_OF_device(np);
- max = min(max, hose->dma_window_base_cur +
- hose->dma_window_size);
- }
- }
-#endif
-
mpc85xx_smp_init();
mpc85xx_mds_qe_init();
-#ifdef CONFIG_SWIOTLB
- if ((memblock_end_of_DRAM() - 1) > max) {
- ppc_swiotlb_enable = 1;
- set_pci_dma_ops(&swiotlb_dma_ops);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
- }
-#endif
+ fsl_pci_assign_primary();
+
+ swiotlb_detect_4g();
}
@@ -409,9 +381,9 @@ static int __init mpc85xx_publish_devices(void)
return mpc85xx_common_publish_devices();
}
-machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices);
-machine_device_initcall(mpc8569_mds, mpc85xx_publish_devices);
-machine_device_initcall(p1021_mds, mpc85xx_common_publish_devices);
+machine_arch_initcall(mpc8568_mds, mpc85xx_publish_devices);
+machine_arch_initcall(mpc8569_mds, mpc85xx_publish_devices);
+machine_arch_initcall(p1021_mds, mpc85xx_common_publish_devices);
machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier);
machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 1910fdcb75b2..ede8771d6f02 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -86,23 +86,17 @@ void __init mpc85xx_rdb_pic_init(void)
*/
static void __init mpc85xx_rdb_setup_arch(void)
{
-#if defined(CONFIG_PCI) || defined(CONFIG_QUICC_ENGINE)
+#ifdef CONFIG_QUICC_ENGINE
struct device_node *np;
#endif
if (ppc_md.progress)
ppc_md.progress("mpc85xx_rdb_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_node_by_type(np, "pci") {
- if (of_device_is_compatible(np, "fsl,mpc8548-pcie"))
- fsl_add_bridge(np, 0);
- }
-
-#endif
-
mpc85xx_smp_init();
+ fsl_pci_assign_primary();
+
#ifdef CONFIG_QUICC_ENGINE
np = of_find_compatible_node(NULL, NULL, "fsl,qe");
if (!np) {
@@ -161,15 +155,15 @@ qe_fail:
printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n");
}
-machine_device_initcall(p2020_rdb, mpc85xx_common_publish_devices);
-machine_device_initcall(p2020_rdb_pc, mpc85xx_common_publish_devices);
-machine_device_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices);
-machine_device_initcall(p1020_rdb, mpc85xx_common_publish_devices);
-machine_device_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices);
-machine_device_initcall(p1020_utm_pc, mpc85xx_common_publish_devices);
-machine_device_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices);
-machine_device_initcall(p1025_rdb, mpc85xx_common_publish_devices);
-machine_device_initcall(p1024_rdb, mpc85xx_common_publish_devices);
+machine_arch_initcall(p2020_rdb, mpc85xx_common_publish_devices);
+machine_arch_initcall(p2020_rdb_pc, mpc85xx_common_publish_devices);
+machine_arch_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices);
+machine_arch_initcall(p1020_rdb, mpc85xx_common_publish_devices);
+machine_arch_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices);
+machine_arch_initcall(p1020_utm_pc, mpc85xx_common_publish_devices);
+machine_arch_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices);
+machine_arch_initcall(p1025_rdb, mpc85xx_common_publish_devices);
+machine_arch_initcall(p1024_rdb, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index dbaf44354f0d..0252961392d5 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -46,25 +46,15 @@ void __init p1010_rdb_pic_init(void)
*/
static void __init p1010_rdb_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("p1010_rdb_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_node_by_type(np, "pci") {
- if (of_device_is_compatible(np, "fsl,p1010-pcie"))
- fsl_add_bridge(np, 0);
- }
-
-#endif
+ fsl_pci_assign_primary();
printk(KERN_INFO "P1010 RDB board from Freescale Semiconductor\n");
}
-machine_device_initcall(p1010_rdb, mpc85xx_common_publish_devices);
+machine_arch_initcall(p1010_rdb, mpc85xx_common_publish_devices);
machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier);
/*
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 3c732acf331d..848a3e98e1c1 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -18,7 +18,6 @@
#include <linux/pci.h>
#include <linux/of_platform.h>
-#include <linux/memblock.h>
#include <asm/div64.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
@@ -507,32 +506,9 @@ early_param("video", early_video_setup);
*/
static void __init p1022_ds_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
- dma_addr_t max = 0xffffffff;
-
if (ppc_md.progress)
ppc_md.progress("p1022_ds_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,p1022-pcie") {
- struct resource rsrc;
- struct pci_controller *hose;
-
- of_address_to_resource(np, 0, &rsrc);
-
- if ((rsrc.start & 0xfffff) == 0x8000)
- fsl_add_bridge(np, 1);
- else
- fsl_add_bridge(np, 0);
-
- hose = pci_find_hose_for_OF_device(np);
- max = min(max, hose->dma_window_base_cur +
- hose->dma_window_size);
- }
-#endif
-
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
diu_ops.get_pixel_format = p1022ds_get_pixel_format;
diu_ops.set_gamma_table = p1022ds_set_gamma_table;
@@ -601,18 +577,14 @@ static void __init p1022_ds_setup_arch(void)
mpc85xx_smp_init();
-#ifdef CONFIG_SWIOTLB
- if ((memblock_end_of_DRAM() - 1) > max) {
- ppc_swiotlb_enable = 1;
- set_pci_dma_ops(&swiotlb_dma_ops);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
- }
-#endif
+ fsl_pci_assign_primary();
+
+ swiotlb_detect_4g();
pr_info("Freescale P1022 DS reference board\n");
}
-machine_device_initcall(p1022_ds, mpc85xx_common_publish_devices);
+machine_arch_initcall(p1022_ds, mpc85xx_common_publish_devices);
machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
new file mode 100644
index 000000000000..55ffa1cc380c
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -0,0 +1,167 @@
+/*
+ * P1022 RDK board specific routines
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * Based on p1022_ds.c
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/pci.h>
+#include <linux/of_platform.h>
+#include <asm/div64.h>
+#include <asm/mpic.h>
+#include <asm/swiotlb.h>
+
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+#include <asm/udbg.h>
+#include <asm/fsl_guts.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
+
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+
+/* DIU Pixel Clock bits of the CLKDVDR Global Utilities register */
+#define CLKDVDR_PXCKEN 0x80000000
+#define CLKDVDR_PXCKINV 0x10000000
+#define CLKDVDR_PXCKDLY 0x06000000
+#define CLKDVDR_PXCLK_MASK 0x00FF0000
+
+/**
+ * p1022rdk_set_monitor_port: switch the output to a different monitor port
+ */
+static void p1022rdk_set_monitor_port(enum fsl_diu_monitor_port port)
+{
+ if (port != FSL_DIU_PORT_DVI) {
+ pr_err("p1022rdk: unsupported monitor port %i\n", port);
+ return;
+ }
+}
+
+/**
+ * p1022rdk_set_pixel_clock: program the DIU's clock
+ *
+ * @pixclock: the wavelength, in picoseconds, of the clock
+ */
+void p1022rdk_set_pixel_clock(unsigned int pixclock)
+{
+ struct device_node *guts_np = NULL;
+ struct ccsr_guts __iomem *guts;
+ unsigned long freq;
+ u64 temp;
+ u32 pxclk;
+
+ /* Map the global utilities registers. */
+ guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
+ if (!guts_np) {
+ pr_err("p1022rdk: missing global utilties device node\n");
+ return;
+ }
+
+ guts = of_iomap(guts_np, 0);
+ of_node_put(guts_np);
+ if (!guts) {
+ pr_err("p1022rdk: could not map global utilties device\n");
+ return;
+ }
+
+ /* Convert pixclock from a wavelength to a frequency */
+ temp = 1000000000000ULL;
+ do_div(temp, pixclock);
+ freq = temp;
+
+ /*
+ * 'pxclk' is the ratio of the platform clock to the pixel clock.
+ * This number is programmed into the CLKDVDR register, and the valid
+ * range of values is 2-255.
+ */
+ pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq);
+ pxclk = clamp_t(u32, pxclk, 2, 255);
+
+ /* Disable the pixel clock, and set it to non-inverted and no delay */
+ clrbits32(&guts->clkdvdr,
+ CLKDVDR_PXCKEN | CLKDVDR_PXCKDLY | CLKDVDR_PXCLK_MASK);
+
+ /* Enable the clock and set the pxclk */
+ setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16));
+
+ iounmap(guts);
+}
+
+/**
+ * p1022rdk_valid_monitor_port: set the monitor port for sysfs
+ */
+enum fsl_diu_monitor_port
+p1022rdk_valid_monitor_port(enum fsl_diu_monitor_port port)
+{
+ return FSL_DIU_PORT_DVI;
+}
+
+#endif
+
+void __init p1022_rdk_pic_init(void)
+{
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
+ MPIC_SINGLE_DEST_CPU,
+ 0, 256, " OpenPIC ");
+ BUG_ON(mpic == NULL);
+ mpic_init(mpic);
+}
+
+/*
+ * Setup the architecture
+ */
+static void __init p1022_rdk_setup_arch(void)
+{
+ if (ppc_md.progress)
+ ppc_md.progress("p1022_rdk_setup_arch()", 0);
+
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+ diu_ops.set_monitor_port = p1022rdk_set_monitor_port;
+ diu_ops.set_pixel_clock = p1022rdk_set_pixel_clock;
+ diu_ops.valid_monitor_port = p1022rdk_valid_monitor_port;
+#endif
+
+ mpc85xx_smp_init();
+
+ fsl_pci_assign_primary();
+
+ swiotlb_detect_4g();
+
+ pr_info("Freescale / iVeia P1022 RDK reference board\n");
+}
+
+machine_arch_initcall(p1022_rdk, mpc85xx_common_publish_devices);
+
+machine_arch_initcall(p1022_rdk, swiotlb_setup_bus_notifier);
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init p1022_rdk_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "fsl,p1022rdk");
+}
+
+define_machine(p1022_rdk) {
+ .name = "P1022 RDK",
+ .probe = p1022_rdk_probe,
+ .setup_arch = p1022_rdk_setup_arch,
+ .init_IRQ = p1022_rdk_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/85xx/p1023_rds.c b/arch/powerpc/platforms/85xx/p1023_rds.c
index 2990e8b13dc9..9cc60a738834 100644
--- a/arch/powerpc/platforms/85xx/p1023_rds.c
+++ b/arch/powerpc/platforms/85xx/p1023_rds.c
@@ -80,15 +80,12 @@ static void __init mpc85xx_rds_setup_arch(void)
}
}
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,p1023-pcie")
- fsl_add_bridge(np, 0);
-#endif
-
mpc85xx_smp_init();
+
+ fsl_pci_assign_primary();
}
-machine_device_initcall(p1023_rds, mpc85xx_common_publish_devices);
+machine_arch_initcall(p1023_rds, mpc85xx_common_publish_devices);
static void __init mpc85xx_rds_pic_init(void)
{
diff --git a/arch/powerpc/platforms/85xx/p2041_rdb.c b/arch/powerpc/platforms/85xx/p2041_rdb.c
index 6541fa2630c0..000c0892fc40 100644
--- a/arch/powerpc/platforms/85xx/p2041_rdb.c
+++ b/arch/powerpc/platforms/85xx/p2041_rdb.c
@@ -80,7 +80,7 @@ define_machine(p2041_rdb) {
.power_save = e500_idle,
};
-machine_device_initcall(p2041_rdb, corenet_ds_publish_devices);
+machine_arch_initcall(p2041_rdb, corenet_ds_publish_devices);
#ifdef CONFIG_SWIOTLB
machine_arch_initcall(p2041_rdb, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/p3041_ds.c b/arch/powerpc/platforms/85xx/p3041_ds.c
index f238efa75891..b3edc205daa9 100644
--- a/arch/powerpc/platforms/85xx/p3041_ds.c
+++ b/arch/powerpc/platforms/85xx/p3041_ds.c
@@ -82,7 +82,7 @@ define_machine(p3041_ds) {
.power_save = e500_idle,
};
-machine_device_initcall(p3041_ds, corenet_ds_publish_devices);
+machine_arch_initcall(p3041_ds, corenet_ds_publish_devices);
#ifdef CONFIG_SWIOTLB
machine_arch_initcall(p3041_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/p4080_ds.c b/arch/powerpc/platforms/85xx/p4080_ds.c
index c92417dc6574..54df10632aea 100644
--- a/arch/powerpc/platforms/85xx/p4080_ds.c
+++ b/arch/powerpc/platforms/85xx/p4080_ds.c
@@ -81,7 +81,7 @@ define_machine(p4080_ds) {
.power_save = e500_idle,
};
-machine_device_initcall(p4080_ds, corenet_ds_publish_devices);
+machine_arch_initcall(p4080_ds, corenet_ds_publish_devices);
#ifdef CONFIG_SWIOTLB
machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier);
#endif
diff --git a/arch/powerpc/platforms/85xx/p5020_ds.c b/arch/powerpc/platforms/85xx/p5020_ds.c
index 17bef15a85ed..753a42c29d4d 100644
--- a/arch/powerpc/platforms/85xx/p5020_ds.c
+++ b/arch/powerpc/platforms/85xx/p5020_ds.c
@@ -91,7 +91,7 @@ define_machine(p5020_ds) {
#endif
};
-machine_device_initcall(p5020_ds, corenet_ds_publish_devices);
+machine_arch_initcall(p5020_ds, corenet_ds_publish_devices);
#ifdef CONFIG_SWIOTLB
machine_arch_initcall(p5020_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/p5040_ds.c b/arch/powerpc/platforms/85xx/p5040_ds.c
new file mode 100644
index 000000000000..11381851828e
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p5040_ds.c
@@ -0,0 +1,89 @@
+/*
+ * P5040 DS Setup
+ *
+ * Copyright 2009-2010 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+#include <asm/machdep.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+
+#include <linux/of_fdt.h>
+
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+#include <asm/ehv_pic.h>
+
+#include "corenet_ds.h"
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init p5040_ds_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+#ifdef CONFIG_SMP
+ extern struct smp_ops_t smp_85xx_ops;
+#endif
+
+ if (of_flat_dt_is_compatible(root, "fsl,P5040DS"))
+ return 1;
+
+ /* Check if we're running under the Freescale hypervisor */
+ if (of_flat_dt_is_compatible(root, "fsl,P5040DS-hv")) {
+ ppc_md.init_IRQ = ehv_pic_init;
+ ppc_md.get_irq = ehv_pic_get_irq;
+ ppc_md.restart = fsl_hv_restart;
+ ppc_md.power_off = fsl_hv_halt;
+ ppc_md.halt = fsl_hv_halt;
+#ifdef CONFIG_SMP
+ /*
+ * Disable the timebase sync operations because we can't write
+ * to the timebase registers under the hypervisor.
+ */
+ smp_85xx_ops.give_timebase = NULL;
+ smp_85xx_ops.take_timebase = NULL;
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+define_machine(p5040_ds) {
+ .name = "P5040 DS",
+ .probe = p5040_ds_probe,
+ .setup_arch = corenet_ds_setup_arch,
+ .init_IRQ = corenet_ds_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+/* coreint doesn't play nice with lazy EE, use legacy mpic for now */
+#ifdef CONFIG_PPC64
+ .get_irq = mpic_get_irq,
+#else
+ .get_irq = mpic_get_coreint_irq,
+#endif
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+#ifdef CONFIG_PPC64
+ .power_save = book3e_idle,
+#else
+ .power_save = e500_idle,
+#endif
+};
+
+machine_arch_initcall(p5040_ds, corenet_ds_publish_devices);
+
+#ifdef CONFIG_SWIOTLB
+machine_arch_initcall(p5040_ds, swiotlb_setup_bus_notifier);
+#endif
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index 95a2e53af71b..f6ea5618c733 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -41,7 +41,8 @@ static void __init qemu_e500_setup_arch(void)
{
ppc_md.progress("qemu_e500_setup_arch()", 0);
- fsl_pci_init();
+ fsl_pci_assign_primary();
+ swiotlb_detect_4g();
mpc85xx_smp_init();
}
@@ -55,7 +56,7 @@ static int __init qemu_e500_probe(void)
return !!of_flat_dt_is_compatible(root, "fsl,qemu-e500");
}
-machine_device_initcall(qemu_e500, mpc85xx_common_publish_devices);
+machine_arch_initcall(qemu_e500, mpc85xx_common_publish_devices);
define_machine(qemu_e500) {
.name = "QEMU e500",
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index cd3a66bdb54b..f62121825914 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -88,26 +88,11 @@ static int __init sbc8548_hw_rev(void)
*/
static void __init sbc8548_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("sbc8548_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_node_by_type(np, "pci") {
- if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
- of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
- struct resource rsrc;
- of_address_to_resource(np, 0, &rsrc);
- if ((rsrc.start & 0xfffff) == 0x8000)
- fsl_add_bridge(np, 1);
- else
- fsl_add_bridge(np, 0);
- }
- }
-#endif
+ fsl_pci_assign_primary();
+
sbc_rev = sbc8548_hw_rev();
}
@@ -128,7 +113,7 @@ static void sbc8548_show_cpuinfo(struct seq_file *m)
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
}
-machine_device_initcall(sbc8548, mpc85xx_common_publish_devices);
+machine_arch_initcall(sbc8548, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index ff4249044a3c..6fcfa12e5c56 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -2,7 +2,7 @@
* Author: Andy Fleming <afleming@freescale.com>
* Kumar Gala <galak@kernel.crashing.org>
*
- * Copyright 2006-2008, 2011 Freescale Semiconductor Inc.
+ * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/kexec.h>
#include <linux/highmem.h>
+#include <linux/cpu.h>
#include <asm/machdep.h>
#include <asm/pgtable.h>
@@ -24,33 +25,118 @@
#include <asm/mpic.h>
#include <asm/cacheflush.h>
#include <asm/dbell.h>
+#include <asm/fsl_guts.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/mpic.h>
#include "smp.h"
-extern void __early_start(void);
-
-#define BOOT_ENTRY_ADDR_UPPER 0
-#define BOOT_ENTRY_ADDR_LOWER 1
-#define BOOT_ENTRY_R3_UPPER 2
-#define BOOT_ENTRY_R3_LOWER 3
-#define BOOT_ENTRY_RESV 4
-#define BOOT_ENTRY_PIR 5
-#define BOOT_ENTRY_R6_UPPER 6
-#define BOOT_ENTRY_R6_LOWER 7
-#define NUM_BOOT_ENTRY 8
-#define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32))
-
-static int __init
-smp_85xx_kick_cpu(int nr)
+struct epapr_spin_table {
+ u32 addr_h;
+ u32 addr_l;
+ u32 r3_h;
+ u32 r3_l;
+ u32 reserved;
+ u32 pir;
+};
+
+static struct ccsr_guts __iomem *guts;
+static u64 timebase;
+static int tb_req;
+static int tb_valid;
+
+static void mpc85xx_timebase_freeze(int freeze)
+{
+ uint32_t mask;
+
+ mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1;
+ if (freeze)
+ setbits32(&guts->devdisr, mask);
+ else
+ clrbits32(&guts->devdisr, mask);
+
+ in_be32(&guts->devdisr);
+}
+
+static void mpc85xx_give_timebase(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ while (!tb_req)
+ barrier();
+ tb_req = 0;
+
+ mpc85xx_timebase_freeze(1);
+ timebase = get_tb();
+ mb();
+ tb_valid = 1;
+
+ while (tb_valid)
+ barrier();
+
+ mpc85xx_timebase_freeze(0);
+
+ local_irq_restore(flags);
+}
+
+static void mpc85xx_take_timebase(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ tb_req = 1;
+ while (!tb_valid)
+ barrier();
+
+ set_tb(timebase >> 32, timebase & 0xffffffff);
+ isync();
+ tb_valid = 0;
+
+ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void __cpuinit smp_85xx_mach_cpu_die(void)
+{
+ unsigned int cpu = smp_processor_id();
+ u32 tmp;
+
+ local_irq_disable();
+ idle_task_exit();
+ generic_set_cpu_dead(cpu);
+ mb();
+
+ mtspr(SPRN_TCR, 0);
+
+ __flush_disable_L1();
+ tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
+ mtspr(SPRN_HID0, tmp);
+ isync();
+
+ /* Enter NAP mode. */
+ tmp = mfmsr();
+ tmp |= MSR_WE;
+ mb();
+ mtmsr(tmp);
+ isync();
+
+ while (1)
+ ;
+}
+#endif
+
+static int __cpuinit smp_85xx_kick_cpu(int nr)
{
unsigned long flags;
const u64 *cpu_rel_addr;
- __iomem u32 *bptr_vaddr;
+ __iomem struct epapr_spin_table *spin_table;
struct device_node *np;
- int n = 0, hw_cpu = get_hard_smp_processor_id(nr);
+ int hw_cpu = get_hard_smp_processor_id(nr);
int ioremappable;
+ int ret = 0;
WARN_ON(nr < 0 || nr >= NR_CPUS);
WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
@@ -75,46 +161,81 @@ smp_85xx_kick_cpu(int nr)
/* Map the spin table */
if (ioremappable)
- bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+ spin_table = ioremap(*cpu_rel_addr,
+ sizeof(struct epapr_spin_table));
else
- bptr_vaddr = phys_to_virt(*cpu_rel_addr);
+ spin_table = phys_to_virt(*cpu_rel_addr);
local_irq_save(flags);
-
- out_be32(bptr_vaddr + BOOT_ENTRY_PIR, hw_cpu);
#ifdef CONFIG_PPC32
- out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
+#ifdef CONFIG_HOTPLUG_CPU
+ /* Corresponding to generic_set_cpu_dead() */
+ generic_set_cpu_up(nr);
+
+ if (system_state == SYSTEM_RUNNING) {
+ out_be32(&spin_table->addr_l, 0);
+
+ /*
+ * We don't set the BPTR register here since it already points
+ * to the boot page properly.
+ */
+ mpic_reset_core(hw_cpu);
+
+ /* wait until core is ready... */
+ if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1,
+ 10000, 100)) {
+ pr_err("%s: timeout waiting for core %d to reset\n",
+ __func__, hw_cpu);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ /* clear the acknowledge status */
+ __secondary_hold_acknowledge = -1;
+ }
+#endif
+ out_be32(&spin_table->pir, hw_cpu);
+ out_be32(&spin_table->addr_l, __pa(__early_start));
if (!ioremappable)
- flush_dcache_range((ulong)bptr_vaddr,
- (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
+ flush_dcache_range((ulong)spin_table,
+ (ulong)spin_table + sizeof(struct epapr_spin_table));
/* Wait a bit for the CPU to ack. */
- while ((__secondary_hold_acknowledge != hw_cpu) && (++n < 1000))
- mdelay(1);
+ if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
+ 10000, 100)) {
+ pr_err("%s: timeout waiting for core %d to ack\n",
+ __func__, hw_cpu);
+ ret = -ENOENT;
+ goto out;
+ }
+out:
#else
smp_generic_kick_cpu(nr);
- out_be64((u64 *)(bptr_vaddr + BOOT_ENTRY_ADDR_UPPER),
- __pa((u64)*((unsigned long long *) generic_secondary_smp_init)));
+ out_be32(&spin_table->pir, hw_cpu);
+ out_be64((u64 *)(&spin_table->addr_h),
+ __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
if (!ioremappable)
- flush_dcache_range((ulong)bptr_vaddr,
- (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
+ flush_dcache_range((ulong)spin_table,
+ (ulong)spin_table + sizeof(struct epapr_spin_table));
#endif
local_irq_restore(flags);
if (ioremappable)
- iounmap(bptr_vaddr);
-
- pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
+ iounmap(spin_table);
- return 0;
+ return ret;
}
struct smp_ops_t smp_85xx_ops = {
.kick_cpu = smp_85xx_kick_cpu,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = generic_cpu_disable,
+ .cpu_die = generic_cpu_die,
+#endif
#ifdef CONFIG_KEXEC
.give_timebase = smp_generic_give_timebase,
.take_timebase = smp_generic_take_timebase,
@@ -218,8 +339,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
}
#endif /* CONFIG_KEXEC */
-static void __init
-smp_85xx_setup_cpu(int cpu_nr)
+static void __cpuinit smp_85xx_setup_cpu(int cpu_nr)
{
if (smp_85xx_ops.probe == smp_mpic_probe)
mpic_setup_this_cpu();
@@ -228,6 +348,16 @@ smp_85xx_setup_cpu(int cpu_nr)
doorbell_setup_this_cpu();
}
+static const struct of_device_id mpc85xx_smp_guts_ids[] = {
+ { .compatible = "fsl,mpc8572-guts", },
+ { .compatible = "fsl,p1020-guts", },
+ { .compatible = "fsl,p1021-guts", },
+ { .compatible = "fsl,p1022-guts", },
+ { .compatible = "fsl,p1023-guts", },
+ { .compatible = "fsl,p2020-guts", },
+ {},
+};
+
void __init mpc85xx_smp_init(void)
{
struct device_node *np;
@@ -249,6 +379,22 @@ void __init mpc85xx_smp_init(void)
smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
}
+ np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
+ if (np) {
+ guts = of_iomap(np, 0);
+ of_node_put(np);
+ if (!guts) {
+ pr_err("%s: Could not map guts node address\n",
+ __func__);
+ return;
+ }
+ smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
+ smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
+#ifdef CONFIG_HOTPLUG_CPU
+ ppc_md.cpu_die = smp_85xx_mach_cpu_die;
+#endif
+ }
+
smp_ops = &smp_85xx_ops;
#ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index b9c6daa07b66..ae368e0e1076 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -66,20 +66,13 @@ static void __init socrates_pic_init(void)
*/
static void __init socrates_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("socrates_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
- fsl_add_bridge(np, 1);
-#endif
+ fsl_pci_assign_primary();
}
-machine_device_initcall(socrates, mpc85xx_common_publish_devices);
+machine_arch_initcall(socrates, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index e0508002b086..6f4939b6309e 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -60,21 +60,14 @@ static void __init stx_gp3_pic_init(void)
*/
static void __init stx_gp3_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("stx_gp3_setup_arch()", 0);
+ fsl_pci_assign_primary();
+
#ifdef CONFIG_CPM2
cpm2_reset();
#endif
-
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
- fsl_add_bridge(np, 1);
-#endif
}
static void stx_gp3_show_cpuinfo(struct seq_file *m)
@@ -93,7 +86,7 @@ static void stx_gp3_show_cpuinfo(struct seq_file *m)
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
}
-machine_device_initcall(stx_gp3, mpc85xx_common_publish_devices);
+machine_arch_initcall(stx_gp3, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index 3e70a2035e53..b4e58cdc09a5 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -59,10 +59,6 @@ static void __init tqm85xx_pic_init(void)
*/
static void __init tqm85xx_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("tqm85xx_setup_arch()", 0);
@@ -70,20 +66,7 @@ static void __init tqm85xx_setup_arch(void)
cpm2_reset();
#endif
-#ifdef CONFIG_PCI
- for_each_node_by_type(np, "pci") {
- if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
- of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
- struct resource rsrc;
- if (!of_address_to_resource(np, 0, &rsrc)) {
- if ((rsrc.start & 0xfffff) == 0x8000)
- fsl_add_bridge(np, 1);
- else
- fsl_add_bridge(np, 0);
- }
- }
- }
-#endif
+ fsl_pci_assign_primary();
}
static void tqm85xx_show_cpuinfo(struct seq_file *m)
@@ -123,9 +106,9 @@ static void __devinit tqm85xx_ti1520_fixup(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1520,
tqm85xx_ti1520_fixup);
-machine_device_initcall(tqm85xx, mpc85xx_common_publish_devices);
+machine_arch_initcall(tqm85xx, mpc85xx_common_publish_devices);
-static const char *board[] __initdata = {
+static const char * const board[] __initconst = {
"tqc,tqm8540",
"tqc,tqm8541",
"tqc,tqm8548",
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index 41c687550ea7..dcbf7e42dce7 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -111,18 +111,11 @@ static void xes_mpc85xx_fixups(void)
}
}
-#ifdef CONFIG_PCI
-static int primary_phb_addr;
-#endif
-
/*
* Setup the architecture
*/
static void __init xes_mpc85xx_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
struct device_node *root;
const char *model = "Unknown";
@@ -137,26 +130,14 @@ static void __init xes_mpc85xx_setup_arch(void)
xes_mpc85xx_fixups();
-#ifdef CONFIG_PCI
- for_each_node_by_type(np, "pci") {
- if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
- of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
- struct resource rsrc;
- of_address_to_resource(np, 0, &rsrc);
- if ((rsrc.start & 0xfffff) == primary_phb_addr)
- fsl_add_bridge(np, 1);
- else
- fsl_add_bridge(np, 0);
- }
- }
-#endif
-
mpc85xx_smp_init();
+
+ fsl_pci_assign_primary();
}
-machine_device_initcall(xes_mpc8572, mpc85xx_common_publish_devices);
-machine_device_initcall(xes_mpc8548, mpc85xx_common_publish_devices);
-machine_device_initcall(xes_mpc8540, mpc85xx_common_publish_devices);
+machine_arch_initcall(xes_mpc8572, mpc85xx_common_publish_devices);
+machine_arch_initcall(xes_mpc8548, mpc85xx_common_publish_devices);
+machine_arch_initcall(xes_mpc8540, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
@@ -165,42 +146,21 @@ static int __init xes_mpc8572_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "xes,MPC8572")) {
-#ifdef CONFIG_PCI
- primary_phb_addr = 0x8000;
-#endif
- return 1;
- } else {
- return 0;
- }
+ return of_flat_dt_is_compatible(root, "xes,MPC8572");
}
static int __init xes_mpc8548_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "xes,MPC8548")) {
-#ifdef CONFIG_PCI
- primary_phb_addr = 0xb000;
-#endif
- return 1;
- } else {
- return 0;
- }
+ return of_flat_dt_is_compatible(root, "xes,MPC8548");
}
static int __init xes_mpc8540_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "xes,MPC8540")) {
-#ifdef CONFIG_PCI
- primary_phb_addr = 0xb000;
-#endif
- return 1;
- } else {
- return 0;
- }
+ return of_flat_dt_is_compatible(root, "xes,MPC8540");
}
define_machine(xes_mpc8572) {
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index 563aafa8629c..bf5338754c5a 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -73,13 +73,6 @@ static void __init gef_ppc9a_init_irq(void)
static void __init gef_ppc9a_setup_arch(void)
{
struct device_node *regs;
-#ifdef CONFIG_PCI
- struct device_node *np;
-
- for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
- fsl_add_bridge(np, 1);
- }
-#endif
printk(KERN_INFO "GE Intelligent Platforms PPC9A 6U VME SBC\n");
@@ -87,6 +80,8 @@ static void __init gef_ppc9a_setup_arch(void)
mpc86xx_smp_init();
#endif
+ fsl_pci_assign_primary();
+
/* Remap basic board registers */
regs = of_find_compatible_node(NULL, NULL, "gef,ppc9a-fpga-regs");
if (regs) {
@@ -221,6 +216,7 @@ static long __init mpc86xx_time_init(void)
static __initdata struct of_device_id of_bus_ids[] = {
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
+ { .compatible = "fsl,mpc8641-pcie", },
{},
};
@@ -231,7 +227,7 @@ static int __init declare_of_platform_devices(void)
return 0;
}
-machine_device_initcall(gef_ppc9a, declare_of_platform_devices);
+machine_arch_initcall(gef_ppc9a, declare_of_platform_devices);
define_machine(gef_ppc9a) {
.name = "GE PPC9A",
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index cc6a91ae0889..0b7851330a07 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -73,20 +73,14 @@ static void __init gef_sbc310_init_irq(void)
static void __init gef_sbc310_setup_arch(void)
{
struct device_node *regs;
-#ifdef CONFIG_PCI
- struct device_node *np;
-
- for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
- fsl_add_bridge(np, 1);
- }
-#endif
-
printk(KERN_INFO "GE Intelligent Platforms SBC310 6U VPX SBC\n");
#ifdef CONFIG_SMP
mpc86xx_smp_init();
#endif
+ fsl_pci_assign_primary();
+
/* Remap basic board registers */
regs = of_find_compatible_node(NULL, NULL, "gef,fpga-regs");
if (regs) {
@@ -209,6 +203,7 @@ static long __init mpc86xx_time_init(void)
static __initdata struct of_device_id of_bus_ids[] = {
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
+ { .compatible = "fsl,mpc8641-pcie", },
{},
};
@@ -219,7 +214,7 @@ static int __init declare_of_platform_devices(void)
return 0;
}
-machine_device_initcall(gef_sbc310, declare_of_platform_devices);
+machine_arch_initcall(gef_sbc310, declare_of_platform_devices);
define_machine(gef_sbc310) {
.name = "GE SBC310",
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index aead6b337f4a..b9eb174897b1 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -73,13 +73,6 @@ static void __init gef_sbc610_init_irq(void)
static void __init gef_sbc610_setup_arch(void)
{
struct device_node *regs;
-#ifdef CONFIG_PCI
- struct device_node *np;
-
- for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
- fsl_add_bridge(np, 1);
- }
-#endif
printk(KERN_INFO "GE Intelligent Platforms SBC610 6U VPX SBC\n");
@@ -87,6 +80,8 @@ static void __init gef_sbc610_setup_arch(void)
mpc86xx_smp_init();
#endif
+ fsl_pci_assign_primary();
+
/* Remap basic board registers */
regs = of_find_compatible_node(NULL, NULL, "gef,fpga-regs");
if (regs) {
@@ -198,6 +193,7 @@ static long __init mpc86xx_time_init(void)
static __initdata struct of_device_id of_bus_ids[] = {
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
+ { .compatible = "fsl,mpc8641-pcie", },
{},
};
@@ -208,7 +204,7 @@ static int __init declare_of_platform_devices(void)
return 0;
}
-machine_device_initcall(gef_sbc610, declare_of_platform_devices);
+machine_arch_initcall(gef_sbc610, declare_of_platform_devices);
define_machine(gef_sbc610) {
.name = "GE SBC610",
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 62cd3c555bfb..a817398a56da 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -91,6 +91,9 @@ static struct of_device_id __initdata mpc8610_ids[] = {
{ .compatible = "simple-bus", },
/* So that the DMA channel nodes can be probed individually: */
{ .compatible = "fsl,eloplus-dma", },
+ /* PCI controllers */
+ { .compatible = "fsl,mpc8610-pci", },
+ { .compatible = "fsl,mpc8641-pcie", },
{}
};
@@ -107,7 +110,7 @@ static int __init mpc8610_declare_of_platform_devices(void)
return 0;
}
-machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices);
+machine_arch_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices);
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
@@ -278,25 +281,13 @@ mpc8610hpcd_valid_monitor_port(enum fsl_diu_monitor_port port)
static void __init mpc86xx_hpcd_setup_arch(void)
{
struct resource r;
- struct device_node *np;
unsigned char *pixis;
if (ppc_md.progress)
ppc_md.progress("mpc86xx_hpcd_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_node_by_type(np, "pci") {
- if (of_device_is_compatible(np, "fsl,mpc8610-pci")
- || of_device_is_compatible(np, "fsl,mpc8641-pcie")) {
- struct resource rsrc;
- of_address_to_resource(np, 0, &rsrc);
- if ((rsrc.start & 0xfffff) == 0xa000)
- fsl_add_bridge(np, 1);
- else
- fsl_add_bridge(np, 0);
- }
- }
-#endif
+ fsl_pci_assign_primary();
+
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
diu_ops.get_pixel_format = mpc8610hpcd_get_pixel_format;
diu_ops.set_gamma_table = mpc8610hpcd_set_gamma_table;
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 817245bc0219..e8bf3fae5606 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -19,7 +19,6 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/of_platform.h>
-#include <linux/memblock.h>
#include <asm/time.h>
#include <asm/machdep.h>
@@ -51,15 +50,8 @@ extern int uli_exclude_device(struct pci_controller *hose,
static int mpc86xx_exclude_device(struct pci_controller *hose,
u_char bus, u_char devfn)
{
- struct device_node* node;
- struct resource rsrc;
-
- node = hose->dn;
- of_address_to_resource(node, 0, &rsrc);
-
- if ((rsrc.start & 0xfffff) == 0x8000) {
+ if (hose->dn == fsl_pci_primary)
return uli_exclude_device(hose, bus, devfn);
- }
return PCIBIOS_SUCCESSFUL;
}
@@ -69,30 +61,11 @@ static int mpc86xx_exclude_device(struct pci_controller *hose,
static void __init
mpc86xx_hpcn_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
- struct pci_controller *hose;
-#endif
- dma_addr_t max = 0xffffffff;
-
if (ppc_md.progress)
ppc_md.progress("mpc86xx_hpcn_setup_arch()", 0);
#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
- struct resource rsrc;
- of_address_to_resource(np, 0, &rsrc);
- if ((rsrc.start & 0xfffff) == 0x8000)
- fsl_add_bridge(np, 1);
- else
- fsl_add_bridge(np, 0);
- hose = pci_find_hose_for_OF_device(np);
- max = min(max, hose->dma_window_base_cur +
- hose->dma_window_size);
- }
-
ppc_md.pci_exclude_device = mpc86xx_exclude_device;
-
#endif
printk("MPC86xx HPCN board from Freescale Semiconductor\n");
@@ -101,13 +74,9 @@ mpc86xx_hpcn_setup_arch(void)
mpc86xx_smp_init();
#endif
-#ifdef CONFIG_SWIOTLB
- if ((memblock_end_of_DRAM() - 1) > max) {
- ppc_swiotlb_enable = 1;
- set_pci_dma_ops(&swiotlb_dma_ops);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
- }
-#endif
+ fsl_pci_assign_primary();
+
+ swiotlb_detect_4g();
}
@@ -162,6 +131,7 @@ static __initdata struct of_device_id of_bus_ids[] = {
{ .compatible = "simple-bus", },
{ .compatible = "fsl,srio", },
{ .compatible = "gianfar", },
+ { .compatible = "fsl,mpc8641-pcie", },
{},
};
@@ -171,7 +141,7 @@ static int __init declare_of_platform_devices(void)
return 0;
}
-machine_device_initcall(mpc86xx_hpcn, declare_of_platform_devices);
+machine_arch_initcall(mpc86xx_hpcn, declare_of_platform_devices);
machine_arch_initcall(mpc86xx_hpcn, swiotlb_setup_bus_notifier);
define_machine(mpc86xx_hpcn) {
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c
index e7007d0d949e..b47a8fd0f3d3 100644
--- a/arch/powerpc/platforms/86xx/sbc8641d.c
+++ b/arch/powerpc/platforms/86xx/sbc8641d.c
@@ -38,23 +38,16 @@
static void __init
sbc8641_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("sbc8641_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie")
- fsl_add_bridge(np, 0);
-#endif
-
printk("SBC8641 board from Wind River\n");
#ifdef CONFIG_SMP
mpc86xx_smp_init();
#endif
+
+ fsl_pci_assign_primary();
}
@@ -102,6 +95,7 @@ mpc86xx_time_init(void)
static __initdata struct of_device_id of_bus_ids[] = {
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
+ { .compatible = "fsl,mpc8641-pcie", },
{},
};
@@ -111,7 +105,7 @@ static int __init declare_of_platform_devices(void)
return 0;
}
-machine_device_initcall(sbc8641, declare_of_platform_devices);
+machine_arch_initcall(sbc8641, declare_of_platform_devices);
define_machine(sbc8641) {
.name = "SBC8641D",
diff --git a/arch/powerpc/platforms/cell/beat.c b/arch/powerpc/platforms/cell/beat.c
index 852592b2b712..affcf566d460 100644
--- a/arch/powerpc/platforms/cell/beat.c
+++ b/arch/powerpc/platforms/cell/beat.c
@@ -136,9 +136,9 @@ ssize_t beat_nvram_get_size(void)
return BEAT_NVRAM_SIZE;
}
-int beat_set_xdabr(unsigned long dabr)
+int beat_set_xdabr(unsigned long dabr, unsigned long dabrx)
{
- if (beat_set_dabr(dabr, DABRX_KERNEL | DABRX_USER))
+ if (beat_set_dabr(dabr, dabrx))
return -1;
return 0;
}
diff --git a/arch/powerpc/platforms/cell/beat.h b/arch/powerpc/platforms/cell/beat.h
index 32c8efcedc80..bfcb8e351ae5 100644
--- a/arch/powerpc/platforms/cell/beat.h
+++ b/arch/powerpc/platforms/cell/beat.h
@@ -32,7 +32,7 @@ void beat_get_rtc_time(struct rtc_time *);
ssize_t beat_nvram_get_size(void);
ssize_t beat_nvram_read(char *, size_t, loff_t *);
ssize_t beat_nvram_write(char *, size_t, loff_t *);
-int beat_set_xdabr(unsigned long);
+int beat_set_xdabr(unsigned long, unsigned long);
void beat_power_save(void);
void beat_kexec_cpu_down(int, int);
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 943c9d39aa16..0f6f83988b3d 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -88,7 +88,7 @@ static inline unsigned int beat_read_mask(unsigned hpte_group)
}
static long beat_lpar_hpte_insert(unsigned long hpte_group,
- unsigned long va, unsigned long pa,
+ unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
int psize, int ssize)
{
@@ -103,7 +103,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
"rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, va, pa, rflags, vflags, psize);
- hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
+ hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(pa, psize) | rflags;
@@ -184,14 +184,14 @@ static void beat_lpar_hptab_clear(void)
*/
static long beat_lpar_hpte_updatepp(unsigned long slot,
unsigned long newpp,
- unsigned long va,
+ unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long lpar_rc;
u64 dummy0, dummy1;
unsigned long want_v;
- want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+ want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
DBG_LOW(" update: "
"avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
@@ -220,15 +220,15 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
return 0;
}
-static long beat_lpar_hpte_find(unsigned long va, int psize)
+static long beat_lpar_hpte_find(unsigned long vpn, int psize)
{
unsigned long hash;
unsigned long i, j;
long slot;
unsigned long want_v, hpte_v;
- hash = hpt_hash(va, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
- want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+ hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
+ want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
for (j = 0; j < 2; j++) {
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -255,14 +255,15 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
unsigned long ea,
int psize, int ssize)
{
- unsigned long lpar_rc, slot, vsid, va;
+ unsigned long vpn;
+ unsigned long lpar_rc, slot, vsid;
u64 dummy0, dummy1;
vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
- va = (vsid << 28) | (ea & 0x0fffffff);
+ vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M);
raw_spin_lock(&beat_htab_lock);
- slot = beat_lpar_hpte_find(va, psize);
+ slot = beat_lpar_hpte_find(vpn, psize);
BUG_ON(slot == -1);
lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
@@ -272,7 +273,7 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
BUG_ON(lpar_rc != 0);
}
-static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
+static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long want_v;
@@ -282,7 +283,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
slot, va, psize, local);
- want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+ want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
raw_spin_lock_irqsave(&beat_htab_lock, flags);
dummy1 = beat_lpar_hpte_getword0(slot);
@@ -311,7 +312,7 @@ void __init hpte_init_beat(void)
}
static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
- unsigned long va, unsigned long pa,
+ unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
int psize, int ssize)
{
@@ -322,11 +323,11 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
return -1;
if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
+ DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, "
"rflags=%lx, vflags=%lx, psize=%d)\n",
- hpte_group, va, pa, rflags, vflags, psize);
+ hpte_group, vpn, pa, rflags, vflags, psize);
- hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
+ hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(pa, psize) | rflags;
@@ -364,14 +365,14 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
*/
static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
unsigned long newpp,
- unsigned long va,
+ unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long lpar_rc;
unsigned long want_v;
unsigned long pss;
- want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+ want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
DBG_LOW(" update: "
@@ -392,16 +393,16 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
return 0;
}
-static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va,
+static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long want_v;
unsigned long lpar_rc;
unsigned long pss;
- DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
- slot, va, psize, local);
- want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+ DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
+ slot, vpn, psize, local);
+ want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 14943ef01918..7d2d036754b5 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -19,12 +19,12 @@
#undef DEBUG
+#include <linux/memblock.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
-#include <asm/abs_addr.h>
#include <asm/firmware.h>
#define IOBMAP_PAGE_SHIFT 12
@@ -99,7 +99,7 @@ static int iobmap_build(struct iommu_table *tbl, long index,
ip = ((u32 *)tbl->it_base) + index;
while (npages--) {
- rpn = virt_to_abs(uaddr) >> IOBMAP_PAGE_SHIFT;
+ rpn = __pa(uaddr) >> IOBMAP_PAGE_SHIFT;
*(ip++) = IOBMAP_L2E_V | rpn;
/* invalidate tlb, can be optimized more */
@@ -258,7 +258,7 @@ void __init alloc_iobmap_l2(void)
return;
#endif
/* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
- iob_l2_base = (u32 *)abs_to_virt(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
+ iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 0e7eccc0f88d..471aa3ccd9fd 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -30,19 +30,10 @@
#include <asm/opal.h>
#include <asm/iommu.h>
#include <asm/tce.h>
-#include <asm/abs_addr.h>
#include "powernv.h"
#include "pci.h"
-struct resource_wrap {
- struct list_head link;
- resource_size_t size;
- resource_size_t align;
- struct pci_dev *dev; /* Set if it's a device */
- struct pci_bus *bus; /* Set if it's a bridge */
-};
-
static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe,
struct va_format *vaf)
{
@@ -78,273 +69,6 @@ define_pe_printk_level(pe_err, KERN_ERR);
define_pe_printk_level(pe_warn, KERN_WARNING);
define_pe_printk_level(pe_info, KERN_INFO);
-
-/* Calculate resource usage & alignment requirement of a single
- * device. This will also assign all resources within the device
- * for a given type starting at 0 for the biggest one and then
- * assigning in decreasing order of size.
- */
-static void __devinit pnv_ioda_calc_dev(struct pci_dev *dev, unsigned int flags,
- resource_size_t *size,
- resource_size_t *align)
-{
- resource_size_t start;
- struct resource *r;
- int i;
-
- pr_devel(" -> CDR %s\n", pci_name(dev));
-
- *size = *align = 0;
-
- /* Clear the resources out and mark them all unset */
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- r = &dev->resource[i];
- if (!(r->flags & flags))
- continue;
- if (r->start) {
- r->end -= r->start;
- r->start = 0;
- }
- r->flags |= IORESOURCE_UNSET;
- }
-
- /* We currently keep all memory resources together, we
- * will handle prefetch & 64-bit separately in the future
- * but for now we stick everybody in M32
- */
- start = 0;
- for (;;) {
- resource_size_t max_size = 0;
- int max_no = -1;
-
- /* Find next biggest resource */
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- r = &dev->resource[i];
- if (!(r->flags & IORESOURCE_UNSET) ||
- !(r->flags & flags))
- continue;
- if (resource_size(r) > max_size) {
- max_size = resource_size(r);
- max_no = i;
- }
- }
- if (max_no < 0)
- break;
- r = &dev->resource[max_no];
- if (max_size > *align)
- *align = max_size;
- *size += max_size;
- r->start = start;
- start += max_size;
- r->end = r->start + max_size - 1;
- r->flags &= ~IORESOURCE_UNSET;
- pr_devel(" -> R%d %016llx..%016llx\n",
- max_no, r->start, r->end);
- }
- pr_devel(" <- CDR %s size=%llx align=%llx\n",
- pci_name(dev), *size, *align);
-}
-
-/* Allocate a resource "wrap" for a given device or bridge and
- * insert it at the right position in the sorted list
- */
-static void __devinit pnv_ioda_add_wrap(struct list_head *list,
- struct pci_bus *bus,
- struct pci_dev *dev,
- resource_size_t size,
- resource_size_t align)
-{
- struct resource_wrap *w1, *w = kzalloc(sizeof(*w), GFP_KERNEL);
-
- w->size = size;
- w->align = align;
- w->dev = dev;
- w->bus = bus;
-
- list_for_each_entry(w1, list, link) {
- if (w1->align < align) {
- list_add_tail(&w->link, &w1->link);
- return;
- }
- }
- list_add_tail(&w->link, list);
-}
-
-/* Offset device resources of a given type */
-static void __devinit pnv_ioda_offset_dev(struct pci_dev *dev,
- unsigned int flags,
- resource_size_t offset)
-{
- struct resource *r;
- int i;
-
- pr_devel(" -> ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset);
-
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- r = &dev->resource[i];
- if (r->flags & flags) {
- dev->resource[i].start += offset;
- dev->resource[i].end += offset;
- }
- }
-
- pr_devel(" <- ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset);
-}
-
-/* Offset bus resources (& all children) of a given type */
-static void __devinit pnv_ioda_offset_bus(struct pci_bus *bus,
- unsigned int flags,
- resource_size_t offset)
-{
- struct resource *r;
- struct pci_dev *dev;
- struct pci_bus *cbus;
- int i;
-
- pr_devel(" -> OBR %s [%x] +%016llx\n",
- bus->self ? pci_name(bus->self) : "root", flags, offset);
-
- pci_bus_for_each_resource(bus, r, i) {
- if (r && (r->flags & flags)) {
- r->start += offset;
- r->end += offset;
- }
- }
- list_for_each_entry(dev, &bus->devices, bus_list)
- pnv_ioda_offset_dev(dev, flags, offset);
- list_for_each_entry(cbus, &bus->children, node)
- pnv_ioda_offset_bus(cbus, flags, offset);
-
- pr_devel(" <- OBR %s [%x]\n",
- bus->self ? pci_name(bus->self) : "root", flags);
-}
-
-/* This is the guts of our IODA resource allocation. This is called
- * recursively for each bus in the system. It calculates all the
- * necessary size and requirements for children and assign them
- * resources such that:
- *
- * - Each function fits in it's own contiguous set of IO/M32
- * segment
- *
- * - All segments behind a P2P bridge are contiguous and obey
- * alignment constraints of those bridges
- */
-static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags,
- resource_size_t *size,
- resource_size_t *align)
-{
- struct pci_controller *hose = pci_bus_to_host(bus);
- struct pnv_phb *phb = hose->private_data;
- resource_size_t dev_size, dev_align, start;
- resource_size_t min_align, min_balign;
- struct pci_dev *cdev;
- struct pci_bus *cbus;
- struct list_head head;
- struct resource_wrap *w;
- unsigned int bres;
-
- *size = *align = 0;
-
- pr_devel("-> CBR %s [%x]\n",
- bus->self ? pci_name(bus->self) : "root", flags);
-
- /* Calculate alignment requirements based on the type
- * of resource we are working on
- */
- if (flags & IORESOURCE_IO) {
- bres = 0;
- min_align = phb->ioda.io_segsize;
- min_balign = 0x1000;
- } else {
- bres = 1;
- min_align = phb->ioda.m32_segsize;
- min_balign = 0x100000;
- }
-
- /* Gather all our children resources ordered by alignment */
- INIT_LIST_HEAD(&head);
-
- /* - Busses */
- list_for_each_entry(cbus, &bus->children, node) {
- pnv_ioda_calc_bus(cbus, flags, &dev_size, &dev_align);
- pnv_ioda_add_wrap(&head, cbus, NULL, dev_size, dev_align);
- }
-
- /* - Devices */
- list_for_each_entry(cdev, &bus->devices, bus_list) {
- pnv_ioda_calc_dev(cdev, flags, &dev_size, &dev_align);
- /* Align them to segment size */
- if (dev_align < min_align)
- dev_align = min_align;
- pnv_ioda_add_wrap(&head, NULL, cdev, dev_size, dev_align);
- }
- if (list_empty(&head))
- goto empty;
-
- /* Now we can do two things: assign offsets to them within that
- * level and get our total alignment & size requirements. The
- * assignment algorithm is going to be uber-trivial for now, we
- * can try to be smarter later at filling out holes.
- */
- if (bus->self) {
- /* No offset for downstream bridges */
- start = 0;
- } else {
- /* Offset from the root */
- if (flags & IORESOURCE_IO)
- /* Don't hand out IO 0 */
- start = hose->io_resource.start + 0x1000;
- else
- start = hose->mem_resources[0].start;
- }
- while(!list_empty(&head)) {
- w = list_first_entry(&head, struct resource_wrap, link);
- list_del(&w->link);
- if (w->size) {
- if (start) {
- start = ALIGN(start, w->align);
- if (w->dev)
- pnv_ioda_offset_dev(w->dev,flags,start);
- else if (w->bus)
- pnv_ioda_offset_bus(w->bus,flags,start);
- }
- if (w->align > *align)
- *align = w->align;
- }
- start += w->size;
- kfree(w);
- }
- *size = start;
-
- /* Align and setup bridge resources */
- *align = max_t(resource_size_t, *align,
- max_t(resource_size_t, min_align, min_balign));
- *size = ALIGN(*size,
- max_t(resource_size_t, min_align, min_balign));
- empty:
- /* Only setup P2P's, not the PHB itself */
- if (bus->self) {
- struct resource *res = bus->resource[bres];
-
- if (WARN_ON(res == NULL))
- return;
-
- /*
- * FIXME: We should probably export and call
- * pci_bridge_check_ranges() to properly re-initialize
- * the PCI portion of the flags here, and to detect
- * what the bridge actually supports.
- */
- res->start = 0;
- res->flags = (*size) ? flags : 0;
- res->end = (*size) ? (*size - 1) : 0;
- }
-
- pr_devel("<- CBR %s [%x] *size=%016llx *align=%016llx\n",
- bus->self ? pci_name(bus->self) : "root", flags,*size,*align);
-}
-
static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
{
struct device_node *np;
@@ -355,172 +79,6 @@ static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
return PCI_DN(np);
}
-static void __devinit pnv_ioda_setup_pe_segments(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
- unsigned int pe, i;
- resource_size_t pos;
- struct resource io_res;
- struct resource m32_res;
- struct pci_bus_region region;
- int rc;
-
- /* Anything not referenced in the device-tree gets PE#0 */
- pe = pdn ? pdn->pe_number : 0;
-
- /* Calculate the device min/max */
- io_res.start = m32_res.start = (resource_size_t)-1;
- io_res.end = m32_res.end = 0;
- io_res.flags = IORESOURCE_IO;
- m32_res.flags = IORESOURCE_MEM;
-
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- struct resource *r = NULL;
- if (dev->resource[i].flags & IORESOURCE_IO)
- r = &io_res;
- if (dev->resource[i].flags & IORESOURCE_MEM)
- r = &m32_res;
- if (!r)
- continue;
- if (dev->resource[i].start < r->start)
- r->start = dev->resource[i].start;
- if (dev->resource[i].end > r->end)
- r->end = dev->resource[i].end;
- }
-
- /* Setup IO segments */
- if (io_res.start < io_res.end) {
- pcibios_resource_to_bus(dev, &region, &io_res);
- pos = region.start;
- i = pos / phb->ioda.io_segsize;
- while(i < phb->ioda.total_pe && pos <= region.end) {
- if (phb->ioda.io_segmap[i]) {
- pr_err("%s: Trying to use IO seg #%d which is"
- " already used by PE# %d\n",
- pci_name(dev), i,
- phb->ioda.io_segmap[i]);
- /* XXX DO SOMETHING TO DISABLE DEVICE ? */
- break;
- }
- phb->ioda.io_segmap[i] = pe;
- rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe,
- OPAL_IO_WINDOW_TYPE,
- 0, i);
- if (rc != OPAL_SUCCESS) {
- pr_err("%s: OPAL error %d setting up mapping"
- " for IO seg# %d\n",
- pci_name(dev), rc, i);
- /* XXX DO SOMETHING TO DISABLE DEVICE ? */
- break;
- }
- pos += phb->ioda.io_segsize;
- i++;
- };
- }
-
- /* Setup M32 segments */
- if (m32_res.start < m32_res.end) {
- pcibios_resource_to_bus(dev, &region, &m32_res);
- pos = region.start;
- i = pos / phb->ioda.m32_segsize;
- while(i < phb->ioda.total_pe && pos <= region.end) {
- if (phb->ioda.m32_segmap[i]) {
- pr_err("%s: Trying to use M32 seg #%d which is"
- " already used by PE# %d\n",
- pci_name(dev), i,
- phb->ioda.m32_segmap[i]);
- /* XXX DO SOMETHING TO DISABLE DEVICE ? */
- break;
- }
- phb->ioda.m32_segmap[i] = pe;
- rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe,
- OPAL_M32_WINDOW_TYPE,
- 0, i);
- if (rc != OPAL_SUCCESS) {
- pr_err("%s: OPAL error %d setting up mapping"
- " for M32 seg# %d\n",
- pci_name(dev), rc, i);
- /* XXX DO SOMETHING TO DISABLE DEVICE ? */
- break;
- }
- pos += phb->ioda.m32_segsize;
- i++;
- }
- }
-}
-
-/* Check if a resource still fits in the total IO or M32 range
- * for a given PHB
- */
-static int __devinit pnv_ioda_resource_fit(struct pci_controller *hose,
- struct resource *r)
-{
- struct resource *bounds;
-
- if (r->flags & IORESOURCE_IO)
- bounds = &hose->io_resource;
- else if (r->flags & IORESOURCE_MEM)
- bounds = &hose->mem_resources[0];
- else
- return 1;
-
- if (r->start >= bounds->start && r->end <= bounds->end)
- return 1;
- r->flags = 0;
- return 0;
-}
-
-static void __devinit pnv_ioda_update_resources(struct pci_bus *bus)
-{
- struct pci_controller *hose = pci_bus_to_host(bus);
- struct pci_bus *cbus;
- struct pci_dev *cdev;
- unsigned int i;
-
- /* We used to clear all device enables here. However it looks like
- * clearing MEM enable causes Obsidian (IPR SCS) to go bonkers,
- * and shoot fatal errors to the PHB which in turns fences itself
- * and we can't recover from that ... yet. So for now, let's leave
- * the enables as-is and hope for the best.
- */
-
- /* Check if bus resources fit in our IO or M32 range */
- for (i = 0; bus->self && (i < 2); i++) {
- struct resource *r = bus->resource[i];
- if (r && !pnv_ioda_resource_fit(hose, r))
- pr_err("%s: Bus %d resource %d disabled, no room\n",
- pci_name(bus->self), bus->number, i);
- }
-
- /* Update self if it's not a PHB */
- if (bus->self)
- pci_setup_bridge(bus);
-
- /* Update child devices */
- list_for_each_entry(cdev, &bus->devices, bus_list) {
- /* Check if resource fits, if not, disabled it */
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- struct resource *r = &cdev->resource[i];
- if (!pnv_ioda_resource_fit(hose, r))
- pr_err("%s: Resource %d disabled, no room\n",
- pci_name(cdev), i);
- }
-
- /* Assign segments */
- pnv_ioda_setup_pe_segments(cdev);
-
- /* Update HW BARs */
- for (i = 0; i <= PCI_ROM_RESOURCE; i++)
- pci_update_resource(cdev, i);
- }
-
- /* Update child busses */
- list_for_each_entry(cbus, &bus->children, node)
- pnv_ioda_update_resources(cbus);
-}
-
static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb)
{
unsigned long pe;
@@ -548,7 +106,7 @@ static void __devinit pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
* but in the meantime, we need to protect them to avoid warnings
*/
#ifdef CONFIG_PCI_MSI
-static struct pnv_ioda_pe * __devinit __pnv_ioda_get_one_pe(struct pci_dev *dev)
+static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -560,19 +118,6 @@ static struct pnv_ioda_pe * __devinit __pnv_ioda_get_one_pe(struct pci_dev *dev)
return NULL;
return &phb->ioda.pe_array[pdn->pe_number];
}
-
-static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev)
-{
- struct pnv_ioda_pe *pe = __pnv_ioda_get_one_pe(dev);
-
- while (!pe && dev->bus->self) {
- dev = dev->bus->self;
- pe = __pnv_ioda_get_one_pe(dev);
- if (pe)
- pe = pe->bus_pe;
- }
- return pe;
-}
#endif /* CONFIG_PCI_MSI */
static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb,
@@ -589,7 +134,11 @@ static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb,
dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
parent = pe->pbus->self;
- count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
+ if (pe->flags & PNV_IODA_PE_BUS_ALL)
+ count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
+ else
+ count = 1;
+
switch(count) {
case 1: bcomp = OpalPciBusAll; break;
case 2: bcomp = OpalPciBus7Bits; break;
@@ -666,13 +215,13 @@ static void __devinit pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
{
struct pnv_ioda_pe *lpe;
- list_for_each_entry(lpe, &phb->ioda.pe_list, link) {
+ list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
if (lpe->dma_weight < pe->dma_weight) {
- list_add_tail(&pe->link, &lpe->link);
+ list_add_tail(&pe->dma_link, &lpe->dma_link);
return;
}
}
- list_add_tail(&pe->link, &phb->ioda.pe_list);
+ list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
}
static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
@@ -699,6 +248,7 @@ static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
return 10;
}
+#if 0
static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
@@ -767,6 +317,7 @@ static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev)
return pe;
}
+#endif /* Useful for SRIOV case */
static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
{
@@ -784,34 +335,33 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
pdn->pcidev = dev;
pdn->pe_number = pe->pe_number;
pe->dma_weight += pnv_ioda_dma_weight(dev);
- if (dev->subordinate)
+ if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
pnv_ioda_setup_same_PE(dev->subordinate, pe);
}
}
-static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
- struct pnv_ioda_pe *ppe)
+/*
+ * There're 2 types of PCI bus sensitive PEs: One that is compromised of
+ * single PCI bus. Another one that contains the primary PCI bus and its
+ * subordinate PCI devices and buses. The second type of PE is normally
+ * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
+ */
+static void __devinit pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pci_controller *hose = pci_bus_to_host(bus);
struct pnv_phb *phb = hose->private_data;
- struct pci_bus *bus = dev->subordinate;
struct pnv_ioda_pe *pe;
int pe_num;
- if (!bus) {
- pr_warning("%s: Bridge without a subordinate bus !\n",
- pci_name(dev));
- return;
- }
pe_num = pnv_ioda_alloc_pe(phb);
if (pe_num == IODA_INVALID_PE) {
- pr_warning("%s: Not enough PE# available, disabling bus\n",
- pci_name(dev));
+ pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
+ __func__, pci_domain_nr(bus), bus->number);
return;
}
pe = &phb->ioda.pe_array[pe_num];
- ppe->bus_pe = pe;
+ pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
pe->pbus = bus;
pe->pdev = NULL;
pe->tce32_seg = -1;
@@ -819,8 +369,12 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
pe->rid = bus->busn_res.start << 8;
pe->dma_weight = 0;
- pe_info(pe, "Secondary busses %pR associated with PE\n",
- &bus->busn_res);
+ if (all)
+ pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
+ bus->busn_res.start, bus->busn_res.end, pe_num);
+ else
+ pe_info(pe, "Secondary bus %d associated with PE#%d\n",
+ bus->busn_res.start, pe_num);
if (pnv_ioda_configure_pe(phb, pe)) {
/* XXX What do we do here ? */
@@ -833,6 +387,9 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
/* Associate it with all child devices */
pnv_ioda_setup_same_PE(bus, pe);
+ /* Put PE to the list */
+ list_add_tail(&pe->list, &phb->ioda.pe_list);
+
/* Account for one DMA PE if at least one DMA capable device exist
* below the bridge
*/
@@ -848,17 +405,33 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus)
{
struct pci_dev *dev;
- struct pnv_ioda_pe *pe;
+
+ pnv_ioda_setup_bus_PE(bus, 0);
list_for_each_entry(dev, &bus->devices, bus_list) {
- pe = pnv_ioda_setup_dev_PE(dev);
- if (pe == NULL)
- continue;
- /* Leaving the PCIe domain ... single PE# */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
- pnv_ioda_setup_bus_PE(dev, pe);
- else if (dev->subordinate)
- pnv_ioda_setup_PEs(dev->subordinate);
+ if (dev->subordinate) {
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
+ pnv_ioda_setup_bus_PE(dev->subordinate, 1);
+ else
+ pnv_ioda_setup_PEs(dev->subordinate);
+ }
+ }
+}
+
+/*
+ * Configure PEs so that the downstream PCI buses and devices
+ * could have their associated PE#. Unfortunately, we didn't
+ * figure out the way to identify the PLX bridge yet. So we
+ * simply put the PCI bus and the subordinate behind the root
+ * port to PE# here. The game rule here is expected to be changed
+ * as soon as we can detected PLX bridge correctly.
+ */
+static void __devinit pnv_pci_ioda_setup_PEs(void)
+{
+ struct pci_controller *hose, *tmp;
+
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ pnv_ioda_setup_PEs(hose->bus);
}
}
@@ -1000,7 +573,7 @@ static void __devinit pnv_ioda_setup_dma(struct pnv_phb *phb)
remaining = phb->ioda.tce32_count;
tw = phb->ioda.dma_weight;
base = 0;
- list_for_each_entry(pe, &phb->ioda.pe_list, link) {
+ list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
if (!pe->dma_weight)
continue;
if (!remaining) {
@@ -1109,36 +682,115 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
#endif /* CONFIG_PCI_MSI */
-/* This is the starting point of our IODA specific resource
- * allocation process
+/*
+ * This function is supposed to be called on basis of PE from top
+ * to bottom style. So the the I/O or MMIO segment assigned to
+ * parent PE could be overrided by its child PEs if necessary.
*/
-static void __devinit pnv_pci_ioda_fixup_phb(struct pci_controller *hose)
+static void __devinit pnv_ioda_setup_pe_seg(struct pci_controller *hose,
+ struct pnv_ioda_pe *pe)
{
- resource_size_t size, align;
- struct pci_bus *child;
+ struct pnv_phb *phb = hose->private_data;
+ struct pci_bus_region region;
+ struct resource *res;
+ int i, index;
+ int rc;
- /* Associate PEs per functions */
- pnv_ioda_setup_PEs(hose->bus);
+ /*
+ * NOTE: We only care PCI bus based PE for now. For PCI
+ * device based PE, for example SRIOV sensitive VF should
+ * be figured out later.
+ */
+ BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
- /* Calculate all resources */
- pnv_ioda_calc_bus(hose->bus, IORESOURCE_IO, &size, &align);
- pnv_ioda_calc_bus(hose->bus, IORESOURCE_MEM, &size, &align);
+ pci_bus_for_each_resource(pe->pbus, res, i) {
+ if (!res || !res->flags ||
+ res->start > res->end)
+ continue;
- /* Apply then to HW */
- pnv_ioda_update_resources(hose->bus);
+ if (res->flags & IORESOURCE_IO) {
+ region.start = res->start - phb->ioda.io_pci_base;
+ region.end = res->end - phb->ioda.io_pci_base;
+ index = region.start / phb->ioda.io_segsize;
+
+ while (index < phb->ioda.total_pe &&
+ region.start <= region.end) {
+ phb->ioda.io_segmap[index] = pe->pe_number;
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id,
+ pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
+ if (rc != OPAL_SUCCESS) {
+ pr_err("%s: OPAL error %d when mapping IO "
+ "segment #%d to PE#%d\n",
+ __func__, rc, index, pe->pe_number);
+ break;
+ }
+
+ region.start += phb->ioda.io_segsize;
+ index++;
+ }
+ } else if (res->flags & IORESOURCE_MEM) {
+ region.start = res->start -
+ hose->pci_mem_offset -
+ phb->ioda.m32_pci_base;
+ region.end = res->end -
+ hose->pci_mem_offset -
+ phb->ioda.m32_pci_base;
+ index = region.start / phb->ioda.m32_segsize;
+
+ while (index < phb->ioda.total_pe &&
+ region.start <= region.end) {
+ phb->ioda.m32_segmap[index] = pe->pe_number;
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id,
+ pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
+ if (rc != OPAL_SUCCESS) {
+ pr_err("%s: OPAL error %d when mapping M32 "
+ "segment#%d to PE#%d",
+ __func__, rc, index, pe->pe_number);
+ break;
+ }
+
+ region.start += phb->ioda.m32_segsize;
+ index++;
+ }
+ }
+ }
+}
- /* Setup DMA */
- pnv_ioda_setup_dma(hose->private_data);
+static void __devinit pnv_pci_ioda_setup_seg(void)
+{
+ struct pci_controller *tmp, *hose;
+ struct pnv_phb *phb;
+ struct pnv_ioda_pe *pe;
- /* Configure PCI Express settings */
- list_for_each_entry(child, &hose->bus->children, node) {
- struct pci_dev *self = child->self;
- if (!self)
- continue;
- pcie_bus_configure_settings(child, self->pcie_mpss);
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ phb = hose->private_data;
+ list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+ pnv_ioda_setup_pe_seg(hose, pe);
+ }
+ }
+}
+
+static void __devinit pnv_pci_ioda_setup_DMA(void)
+{
+ struct pci_controller *hose, *tmp;
+ struct pnv_phb *phb;
+
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ pnv_ioda_setup_dma(hose->private_data);
+
+ /* Mark the PHB initialization done */
+ phb = hose->private_data;
+ phb->initialized = 1;
}
}
+static void __devinit pnv_pci_ioda_fixup(void)
+{
+ pnv_pci_ioda_setup_PEs();
+ pnv_pci_ioda_setup_seg();
+ pnv_pci_ioda_setup_DMA();
+}
+
/*
* Returns the alignment for I/O or memory windows for P2P
* bridges. That actually depends on how PEs are segmented.
@@ -1182,10 +834,22 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
*/
static int __devinit pnv_pci_enable_device_hook(struct pci_dev *dev)
{
- struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pci_dn *pdn;
+ /* The function is probably called while the PEs have
+ * not be created yet. For example, resource reassignment
+ * during PCI probe period. We just skip the check if
+ * PEs isn't ready.
+ */
+ if (!phb->initialized)
+ return 0;
+
+ pdn = pnv_ioda_get_pdn(dev);
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
return -EINVAL;
+
return 0;
}
@@ -1276,9 +940,9 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
/* Allocate aux data & arrays */
size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
m32map_off = size;
- size += phb->ioda.total_pe;
+ size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
iomap_off = size;
- size += phb->ioda.total_pe;
+ size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
pemap_off = size;
size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
aux = alloc_bootmem(size);
@@ -1289,6 +953,7 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
phb->ioda.pe_array = aux + pemap_off;
set_bit(0, phb->ioda.pe_alloc);
+ INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
INIT_LIST_HEAD(&phb->ioda.pe_list);
/* Calculate how many 32-bit TCE segments we have */
@@ -1337,15 +1002,17 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
/* Setup MSI support */
pnv_pci_init_ioda_msis(phb);
- /* We set both PCI_PROBE_ONLY and PCI_REASSIGN_ALL_RSRC. This is an
- * odd combination which essentially means that we skip all resource
- * fixups and assignments in the generic code, and do it all
- * ourselves here
+ /*
+ * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
+ * to let the PCI core do resource assignment. It's supposed
+ * that the PCI core will do correct I/O and MMIO alignment
+ * for the P2P bridge bars so that each PCI bus (excluding
+ * the child P2P bridges) can form individual PE.
*/
- ppc_md.pcibios_fixup_phb = pnv_pci_ioda_fixup_phb;
+ ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
- pci_add_flags(PCI_PROBE_ONLY | PCI_REASSIGN_ALL_RSRC);
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC);
/* Reset IODA tables to a clean state */
rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 264967770c3a..6b4bef4e9d82 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -30,7 +30,6 @@
#include <asm/opal.h>
#include <asm/iommu.h>
#include <asm/tce.h>
-#include <asm/abs_addr.h>
#include "powernv.h"
#include "pci.h"
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index be3cfc5ceabb..c01688a1a741 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -30,7 +30,6 @@
#include <asm/opal.h>
#include <asm/iommu.h>
#include <asm/tce.h>
-#include <asm/abs_addr.h>
#include <asm/firmware.h>
#include "powernv.h"
@@ -447,6 +446,11 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
pnv_tce_invalidate(tbl, tces, tcep - 1);
}
+static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
+{
+ return ((u64 *)tbl->it_base)[index - tbl->it_offset];
+}
+
void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
void *tce_mem, u64 tce_size,
u64 dma_offset)
@@ -597,6 +601,7 @@ void __init pnv_pci_init(void)
ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
ppc_md.tce_build = pnv_tce_build;
ppc_md.tce_free = pnv_tce_free;
+ ppc_md.tce_get = pnv_tce_get;
ppc_md.pci_probe_mode = pnv_pci_probe_mode;
set_pci_dma_ops(&dma_iommu_ops);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 8bc479634643..7cfb7c883deb 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -17,9 +17,14 @@ enum pnv_phb_model {
};
#define PNV_PCI_DIAG_BUF_SIZE 4096
+#define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */
+#define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */
+#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
/* Data associated with a PE, including IOMMU tracking etc.. */
struct pnv_ioda_pe {
+ unsigned long flags;
+
/* A PE can be associated with a single device or an
* entire bus (& children). In the former case, pdev
* is populated, in the later case, pbus is.
@@ -40,11 +45,6 @@ struct pnv_ioda_pe {
*/
unsigned int dma_weight;
- /* This is a PCI-E -> PCI-X bridge, this points to the
- * corresponding bus PE
- */
- struct pnv_ioda_pe *bus_pe;
-
/* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
int tce32_seg;
int tce32_segcount;
@@ -59,7 +59,8 @@ struct pnv_ioda_pe {
int mve_number;
/* Link in list of PE#s */
- struct list_head link;
+ struct list_head dma_link;
+ struct list_head list;
};
struct pnv_phb {
@@ -68,6 +69,7 @@ struct pnv_phb {
enum pnv_phb_model model;
u64 opal_id;
void __iomem *regs;
+ int initialized;
spinlock_t lock;
#ifdef CONFIG_PCI_MSI
@@ -107,6 +109,11 @@ struct pnv_phb {
unsigned int *io_segmap;
struct pnv_ioda_pe *pe_array;
+ /* Sorted list of used PE's based
+ * on the sequence of creation
+ */
+ struct list_head pe_list;
+
/* Reverse map of PEs, will have to extend if
* we are to support more than 256 PEs, indexed
* bus { bus, devfn }
@@ -125,7 +132,7 @@ struct pnv_phb {
/* Sorted list of used PE's, sorted at
* boot for resource allocation purposes
*/
- struct list_head pe_list;
+ struct list_head pe_dma_list;
} ioda;
};
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index 3124cf791ebb..d00d7b0a3bda 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -43,7 +43,7 @@ enum ps3_lpar_vas_id {
static DEFINE_SPINLOCK(ps3_htab_lock);
-static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
+static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
unsigned long pa, unsigned long rflags, unsigned long vflags,
int psize, int ssize)
{
@@ -61,7 +61,7 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
*/
vflags &= ~HPTE_V_SECONDARY;
- hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
+ hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
spin_lock_irqsave(&ps3_htab_lock, flags);
@@ -75,8 +75,8 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
if (result) {
/* all entries bolted !*/
- pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
- __func__, result, va, pa, hpte_group, hpte_v, hpte_r);
+ pr_info("%s:result=%d vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
+ __func__, result, vpn, pa, hpte_group, hpte_v, hpte_r);
BUG();
}
@@ -107,7 +107,7 @@ static long ps3_hpte_remove(unsigned long hpte_group)
}
static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
- unsigned long va, int psize, int ssize, int local)
+ unsigned long vpn, int psize, int ssize, int local)
{
int result;
u64 hpte_v, want_v, hpte_rs;
@@ -115,7 +115,7 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long flags;
long ret;
- want_v = hpte_encode_v(va, psize, ssize);
+ want_v = hpte_encode_v(vpn, psize, ssize);
spin_lock_irqsave(&ps3_htab_lock, flags);
@@ -125,8 +125,8 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
&hpte_rs);
if (result) {
- pr_info("%s: res=%d read va=%lx slot=%lx psize=%d\n",
- __func__, result, va, slot, psize);
+ pr_info("%s: res=%d read vpn=%lx slot=%lx psize=%d\n",
+ __func__, result, vpn, slot, psize);
BUG();
}
@@ -159,7 +159,7 @@ static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
panic("ps3_hpte_updateboltedpp() not implemented");
}
-static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
+static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long flags;
@@ -170,8 +170,8 @@ static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0);
if (result) {
- pr_info("%s: res=%d va=%lx slot=%lx psize=%d\n",
- __func__, result, va, slot, psize);
+ pr_info("%s: res=%d vpn=%lx slot=%lx psize=%d\n",
+ __func__, result, vpn, slot, psize);
BUG();
}
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 2d664c5a83b0..3f509f86432c 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -184,11 +184,15 @@ early_param("ps3flash", early_parse_ps3flash);
#define prealloc_ps3flash_bounce_buffer() do { } while (0)
#endif
-static int ps3_set_dabr(unsigned long dabr)
+static int ps3_set_dabr(unsigned long dabr, unsigned long dabrx)
{
- enum {DABR_USER = 1, DABR_KERNEL = 2,};
+ /* Have to set at least one bit in the DABRX */
+ if (dabrx == 0 && dabr == 0)
+ dabrx = DABRX_USER;
+ /* hypervisor only allows us to set BTI, Kernel and user */
+ dabrx &= DABRX_BTI | DABRX_KERNEL | DABRX_USER;
- return lv1_set_dabr(dabr, DABR_KERNEL | DABR_USER) ? -1 : 0;
+ return lv1_set_dabr(dabr, dabrx) ? -1 : 0;
}
static void __init ps3_setup_arch(void)
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index c222189f5bb2..890622b87c8f 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -6,8 +6,9 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
firmware.o power.o dlpar.o mobility.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCANLOG) += scanlog.o
-obj-$(CONFIG_EEH) += eeh.o eeh_dev.o eeh_cache.o eeh_driver.o \
- eeh_event.o eeh_sysfs.o eeh_pseries.o
+obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
+ eeh_driver.o eeh_event.o eeh_sysfs.o \
+ eeh_pseries.o
obj-$(CONFIG_KEXEC) += kexec.o
obj-$(CONFIG_PCI) += pci.o pci_dlpar.o
obj-$(CONFIG_PSERIES_MSI) += msi.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index ecd394cf34e6..9a04322b1736 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -92,6 +92,20 @@ struct eeh_ops *eeh_ops = NULL;
int eeh_subsystem_enabled;
EXPORT_SYMBOL(eeh_subsystem_enabled);
+/*
+ * EEH probe mode support. The intention is to support multiple
+ * platforms for EEH. Some platforms like pSeries do PCI emunation
+ * based on device tree. However, other platforms like powernv probe
+ * PCI devices from hardware. The flag is used to distinguish that.
+ * In addition, struct eeh_ops::probe would be invoked for particular
+ * OF node or PCI device so that the corresponding PE would be created
+ * there.
+ */
+int eeh_probe_mode;
+
+/* Global EEH mutex */
+DEFINE_MUTEX(eeh_mutex);
+
/* Lock to avoid races due to multiple reports of an error */
static DEFINE_RAW_SPINLOCK(confirm_error_lock);
@@ -204,22 +218,12 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
}
}
- /* Gather status on devices under the bridge */
- if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
- struct device_node *child;
-
- for_each_child_of_node(dn, child) {
- if (of_node_to_eeh_dev(child))
- n += eeh_gather_pci_data(of_node_to_eeh_dev(child), buf+n, len-n);
- }
- }
-
return n;
}
/**
* eeh_slot_error_detail - Generate combined log including driver log and error log
- * @edev: device to report error log for
+ * @pe: EEH PE
* @severity: temporary or permanent error log
*
* This routine should be called to generate the combined log, which
@@ -227,17 +231,22 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
* out from the config space of the corresponding PCI device, while
* the error log is fetched through platform dependent function call.
*/
-void eeh_slot_error_detail(struct eeh_dev *edev, int severity)
+void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
{
size_t loglen = 0;
- pci_regs_buf[0] = 0;
+ struct eeh_dev *edev;
- eeh_pci_enable(edev, EEH_OPT_THAW_MMIO);
- eeh_ops->configure_bridge(eeh_dev_to_of_node(edev));
- eeh_restore_bars(edev);
- loglen = eeh_gather_pci_data(edev, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
+ eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
+ eeh_ops->configure_bridge(pe);
+ eeh_pe_restore_bars(pe);
- eeh_ops->get_log(eeh_dev_to_of_node(edev), severity, pci_regs_buf, loglen);
+ pci_regs_buf[0] = 0;
+ eeh_pe_for_each_dev(pe, edev) {
+ loglen += eeh_gather_pci_data(edev, pci_regs_buf,
+ EEH_PCI_REGS_LOG_LEN);
+ }
+
+ eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
}
/**
@@ -261,126 +270,8 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
}
/**
- * eeh_find_device_pe - Retrieve the PE for the given device
- * @dn: device node
- *
- * Return the PE under which this device lies
- */
-struct device_node *eeh_find_device_pe(struct device_node *dn)
-{
- while (dn->parent && of_node_to_eeh_dev(dn->parent) &&
- (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) {
- dn = dn->parent;
- }
- return dn;
-}
-
-/**
- * __eeh_mark_slot - Mark all child devices as failed
- * @parent: parent device
- * @mode_flag: failure flag
- *
- * Mark all devices that are children of this device as failed.
- * Mark the device driver too, so that it can see the failure
- * immediately; this is critical, since some drivers poll
- * status registers in interrupts ... If a driver is polling,
- * and the slot is frozen, then the driver can deadlock in
- * an interrupt context, which is bad.
- */
-static void __eeh_mark_slot(struct device_node *parent, int mode_flag)
-{
- struct device_node *dn;
-
- for_each_child_of_node(parent, dn) {
- if (of_node_to_eeh_dev(dn)) {
- /* Mark the pci device driver too */
- struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev;
-
- of_node_to_eeh_dev(dn)->mode |= mode_flag;
-
- if (dev && dev->driver)
- dev->error_state = pci_channel_io_frozen;
-
- __eeh_mark_slot(dn, mode_flag);
- }
- }
-}
-
-/**
- * eeh_mark_slot - Mark the indicated device and its children as failed
- * @dn: parent device
- * @mode_flag: failure flag
- *
- * Mark the indicated device and its child devices as failed.
- * The device drivers are marked as failed as well.
- */
-void eeh_mark_slot(struct device_node *dn, int mode_flag)
-{
- struct pci_dev *dev;
- dn = eeh_find_device_pe(dn);
-
- /* Back up one, since config addrs might be shared */
- if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
- dn = dn->parent;
-
- of_node_to_eeh_dev(dn)->mode |= mode_flag;
-
- /* Mark the pci device too */
- dev = of_node_to_eeh_dev(dn)->pdev;
- if (dev)
- dev->error_state = pci_channel_io_frozen;
-
- __eeh_mark_slot(dn, mode_flag);
-}
-
-/**
- * __eeh_clear_slot - Clear failure flag for the child devices
- * @parent: parent device
- * @mode_flag: flag to be cleared
- *
- * Clear failure flag for the child devices.
- */
-static void __eeh_clear_slot(struct device_node *parent, int mode_flag)
-{
- struct device_node *dn;
-
- for_each_child_of_node(parent, dn) {
- if (of_node_to_eeh_dev(dn)) {
- of_node_to_eeh_dev(dn)->mode &= ~mode_flag;
- of_node_to_eeh_dev(dn)->check_count = 0;
- __eeh_clear_slot(dn, mode_flag);
- }
- }
-}
-
-/**
- * eeh_clear_slot - Clear failure flag for the indicated device and its children
- * @dn: parent device
- * @mode_flag: flag to be cleared
- *
- * Clear failure flag for the indicated device and its children.
- */
-void eeh_clear_slot(struct device_node *dn, int mode_flag)
-{
- unsigned long flags;
- raw_spin_lock_irqsave(&confirm_error_lock, flags);
-
- dn = eeh_find_device_pe(dn);
-
- /* Back up one, since config addrs might be shared */
- if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
- dn = dn->parent;
-
- of_node_to_eeh_dev(dn)->mode &= ~mode_flag;
- of_node_to_eeh_dev(dn)->check_count = 0;
- __eeh_clear_slot(dn, mode_flag);
- raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
-}
-
-/**
- * eeh_dn_check_failure - Check if all 1's data is due to EEH slot freeze
- * @dn: device node
- * @dev: pci device, if known
+ * eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze
+ * @edev: eeh device
*
* Check for an EEH failure for the given device node. Call this
* routine if the result of a read was all 0xff's and you want to
@@ -392,11 +283,13 @@ void eeh_clear_slot(struct device_node *dn, int mode_flag)
*
* It is safe to call this routine in an interrupt context.
*/
-int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
+int eeh_dev_check_failure(struct eeh_dev *edev)
{
int ret;
unsigned long flags;
- struct eeh_dev *edev;
+ struct device_node *dn;
+ struct pci_dev *dev;
+ struct eeh_pe *pe;
int rc = 0;
const char *location;
@@ -405,23 +298,23 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
if (!eeh_subsystem_enabled)
return 0;
- if (!dn) {
+ if (!edev) {
eeh_stats.no_dn++;
return 0;
}
- dn = eeh_find_device_pe(dn);
- edev = of_node_to_eeh_dev(dn);
+ dn = eeh_dev_to_of_node(edev);
+ dev = eeh_dev_to_pci_dev(edev);
+ pe = edev->pe;
/* Access to IO BARs might get this far and still not want checking. */
- if (!(edev->mode & EEH_MODE_SUPPORTED) ||
- edev->mode & EEH_MODE_NOCHECK) {
+ if (!pe) {
eeh_stats.ignored_check++;
- pr_debug("EEH: Ignored check (%x) for %s %s\n",
- edev->mode, eeh_pci_name(dev), dn->full_name);
+ pr_debug("EEH: Ignored check for %s %s\n",
+ eeh_pci_name(dev), dn->full_name);
return 0;
}
- if (!edev->config_addr && !edev->pe_config_addr) {
+ if (!pe->addr && !pe->config_addr) {
eeh_stats.no_cfg_addr++;
return 0;
}
@@ -434,13 +327,13 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
*/
raw_spin_lock_irqsave(&confirm_error_lock, flags);
rc = 1;
- if (edev->mode & EEH_MODE_ISOLATED) {
- edev->check_count++;
- if (edev->check_count % EEH_MAX_FAILS == 0) {
+ if (pe->state & EEH_PE_ISOLATED) {
+ pe->check_count++;
+ if (pe->check_count % EEH_MAX_FAILS == 0) {
location = of_get_property(dn, "ibm,loc-code", NULL);
printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
"location=%s driver=%s pci addr=%s\n",
- edev->check_count, location,
+ pe->check_count, location,
eeh_driver_name(dev), eeh_pci_name(dev));
printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
eeh_driver_name(dev));
@@ -456,7 +349,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
* function zero of a multi-function device.
* In any case they must share a common PHB.
*/
- ret = eeh_ops->get_state(dn, NULL);
+ ret = eeh_ops->get_state(pe, NULL);
/* Note that config-io to empty slots may fail;
* they are empty when they don't have children.
@@ -469,7 +362,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
(ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) ==
(EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
eeh_stats.false_positives++;
- edev->false_positives ++;
+ pe->false_positives++;
rc = 0;
goto dn_unlock;
}
@@ -480,10 +373,10 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
* with other functions on this device, and functions under
* bridges.
*/
- eeh_mark_slot(dn, EEH_MODE_ISOLATED);
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
- eeh_send_failure_event(edev);
+ eeh_send_failure_event(pe);
/* Most EEH events are due to device driver bugs. Having
* a stack trace will help the device-driver authors figure
@@ -497,7 +390,7 @@ dn_unlock:
return rc;
}
-EXPORT_SYMBOL_GPL(eeh_dn_check_failure);
+EXPORT_SYMBOL_GPL(eeh_dev_check_failure);
/**
* eeh_check_failure - Check if all 1's data is due to EEH slot freeze
@@ -514,21 +407,19 @@ EXPORT_SYMBOL_GPL(eeh_dn_check_failure);
unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
{
unsigned long addr;
- struct pci_dev *dev;
- struct device_node *dn;
+ struct eeh_dev *edev;
/* Finding the phys addr + pci device; this is pretty quick. */
addr = eeh_token_to_phys((unsigned long __force) token);
- dev = pci_addr_cache_get_device(addr);
- if (!dev) {
+ edev = eeh_addr_cache_get_dev(addr);
+ if (!edev) {
eeh_stats.no_device++;
return val;
}
- dn = pci_device_to_OF_node(dev);
- eeh_dn_check_failure(dn, dev);
+ eeh_dev_check_failure(edev);
- pci_dev_put(dev);
+ pci_dev_put(eeh_dev_to_pci_dev(edev));
return val;
}
@@ -537,23 +428,22 @@ EXPORT_SYMBOL(eeh_check_failure);
/**
* eeh_pci_enable - Enable MMIO or DMA transfers for this slot
- * @edev: pci device node
+ * @pe: EEH PE
*
* This routine should be called to reenable frozen MMIO or DMA
* so that it would work correctly again. It's useful while doing
* recovery or log collection on the indicated device.
*/
-int eeh_pci_enable(struct eeh_dev *edev, int function)
+int eeh_pci_enable(struct eeh_pe *pe, int function)
{
int rc;
- struct device_node *dn = eeh_dev_to_of_node(edev);
- rc = eeh_ops->set_option(dn, function);
+ rc = eeh_ops->set_option(pe, function);
if (rc)
- printk(KERN_WARNING "EEH: Unexpected state change %d, err=%d dn=%s\n",
- function, rc, dn->full_name);
+ pr_warning("%s: Unexpected state change %d on PHB#%d-PE#%x, err=%d\n",
+ __func__, function, pe->phb->global_number, pe->addr, rc);
- rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC);
+ rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) &&
(function == EEH_OPT_THAW_MMIO))
return 0;
@@ -571,17 +461,24 @@ int eeh_pci_enable(struct eeh_dev *edev, int function)
*/
int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
{
- struct device_node *dn = pci_device_to_OF_node(dev);
+ struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
+ struct eeh_pe *pe = edev->pe;
+
+ if (!pe) {
+ pr_err("%s: No PE found on PCI device %s\n",
+ __func__, pci_name(dev));
+ return -EINVAL;
+ }
switch (state) {
case pcie_deassert_reset:
- eeh_ops->reset(dn, EEH_RESET_DEACTIVATE);
+ eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
break;
case pcie_hot_reset:
- eeh_ops->reset(dn, EEH_RESET_HOT);
+ eeh_ops->reset(pe, EEH_RESET_HOT);
break;
case pcie_warm_reset:
- eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL);
+ eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
break;
default:
return -EINVAL;
@@ -591,66 +488,37 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
}
/**
- * __eeh_set_pe_freset - Check the required reset for child devices
- * @parent: parent device
- * @freset: return value
- *
- * Each device might have its preferred reset type: fundamental or
- * hot reset. The routine is used to collect the information from
- * the child devices so that they could be reset accordingly.
- */
-void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset)
-{
- struct device_node *dn;
-
- for_each_child_of_node(parent, dn) {
- if (of_node_to_eeh_dev(dn)) {
- struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev;
-
- if (dev && dev->driver)
- *freset |= dev->needs_freset;
-
- __eeh_set_pe_freset(dn, freset);
- }
- }
-}
-
-/**
- * eeh_set_pe_freset - Check the required reset for the indicated device and its children
- * @dn: parent device
- * @freset: return value
+ * eeh_set_pe_freset - Check the required reset for the indicated device
+ * @data: EEH device
+ * @flag: return value
*
* Each device might have its preferred reset type: fundamental or
* hot reset. The routine is used to collected the information for
* the indicated device and its children so that the bunch of the
* devices could be reset properly.
*/
-void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset)
+static void *eeh_set_dev_freset(void *data, void *flag)
{
struct pci_dev *dev;
- dn = eeh_find_device_pe(dn);
-
- /* Back up one, since config addrs might be shared */
- if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
- dn = dn->parent;
+ unsigned int *freset = (unsigned int *)flag;
+ struct eeh_dev *edev = (struct eeh_dev *)data;
- dev = of_node_to_eeh_dev(dn)->pdev;
+ dev = eeh_dev_to_pci_dev(edev);
if (dev)
*freset |= dev->needs_freset;
- __eeh_set_pe_freset(dn, freset);
+ return NULL;
}
/**
* eeh_reset_pe_once - Assert the pci #RST line for 1/4 second
- * @edev: pci device node to be reset.
+ * @pe: EEH PE
*
* Assert the PCI #RST line for 1/4 second.
*/
-static void eeh_reset_pe_once(struct eeh_dev *edev)
+static void eeh_reset_pe_once(struct eeh_pe *pe)
{
unsigned int freset = 0;
- struct device_node *dn = eeh_dev_to_of_node(edev);
/* Determine type of EEH reset required for
* Partitionable Endpoint, a hot-reset (1)
@@ -658,12 +526,12 @@ static void eeh_reset_pe_once(struct eeh_dev *edev)
* A fundamental reset required by any device under
* Partitionable Endpoint trumps hot-reset.
*/
- eeh_set_pe_freset(dn, &freset);
+ eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset);
if (freset)
- eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL);
+ eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
else
- eeh_ops->reset(dn, EEH_RESET_HOT);
+ eeh_ops->reset(pe, EEH_RESET_HOT);
/* The PCI bus requires that the reset be held high for at least
* a 100 milliseconds. We wait a bit longer 'just in case'.
@@ -675,9 +543,9 @@ static void eeh_reset_pe_once(struct eeh_dev *edev)
* pci slot reset line is dropped. Make sure we don't miss
* these, and clear the flag now.
*/
- eeh_clear_slot(dn, EEH_MODE_ISOLATED);
+ eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
- eeh_ops->reset(dn, EEH_RESET_DEACTIVATE);
+ eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
/* After a PCI slot has been reset, the PCI Express spec requires
* a 1.5 second idle time for the bus to stabilize, before starting
@@ -689,116 +557,36 @@ static void eeh_reset_pe_once(struct eeh_dev *edev)
/**
* eeh_reset_pe - Reset the indicated PE
- * @edev: PCI device associated EEH device
+ * @pe: EEH PE
*
* This routine should be called to reset indicated device, including
* PE. A PE might include multiple PCI devices and sometimes PCI bridges
* might be involved as well.
*/
-int eeh_reset_pe(struct eeh_dev *edev)
+int eeh_reset_pe(struct eeh_pe *pe)
{
int i, rc;
- struct device_node *dn = eeh_dev_to_of_node(edev);
/* Take three shots at resetting the bus */
for (i=0; i<3; i++) {
- eeh_reset_pe_once(edev);
+ eeh_reset_pe_once(pe);
- rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC);
+ rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE))
return 0;
if (rc < 0) {
- printk(KERN_ERR "EEH: unrecoverable slot failure %s\n",
- dn->full_name);
+ pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x",
+ __func__, pe->phb->global_number, pe->addr);
return -1;
}
- printk(KERN_ERR "EEH: bus reset %d failed on slot %s, rc=%d\n",
- i+1, dn->full_name, rc);
+ pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n",
+ i+1, pe->phb->global_number, pe->addr, rc);
}
return -1;
}
-/** Save and restore of PCI BARs
- *
- * Although firmware will set up BARs during boot, it doesn't
- * set up device BAR's after a device reset, although it will,
- * if requested, set up bridge configuration. Thus, we need to
- * configure the PCI devices ourselves.
- */
-
-/**
- * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
- * @edev: PCI device associated EEH device
- *
- * Loads the PCI configuration space base address registers,
- * the expansion ROM base address, the latency timer, and etc.
- * from the saved values in the device node.
- */
-static inline void eeh_restore_one_device_bars(struct eeh_dev *edev)
-{
- int i;
- u32 cmd;
- struct device_node *dn = eeh_dev_to_of_node(edev);
-
- if (!edev->phb)
- return;
-
- for (i=4; i<10; i++) {
- eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
- }
-
- /* 12 == Expansion ROM Address */
- eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
-
-#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
-#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
-
- eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
- SAVED_BYTE(PCI_CACHE_LINE_SIZE));
-
- eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
- SAVED_BYTE(PCI_LATENCY_TIMER));
-
- /* max latency, min grant, interrupt pin and line */
- eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
-
- /* Restore PERR & SERR bits, some devices require it,
- * don't touch the other command bits
- */
- eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd);
- if (edev->config_space[1] & PCI_COMMAND_PARITY)
- cmd |= PCI_COMMAND_PARITY;
- else
- cmd &= ~PCI_COMMAND_PARITY;
- if (edev->config_space[1] & PCI_COMMAND_SERR)
- cmd |= PCI_COMMAND_SERR;
- else
- cmd &= ~PCI_COMMAND_SERR;
- eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
-}
-
-/**
- * eeh_restore_bars - Restore the PCI config space info
- * @edev: EEH device
- *
- * This routine performs a recursive walk to the children
- * of this device as well.
- */
-void eeh_restore_bars(struct eeh_dev *edev)
-{
- struct device_node *dn;
- if (!edev)
- return;
-
- if ((edev->mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(edev->class_code))
- eeh_restore_one_device_bars(edev);
-
- for_each_child_of_node(eeh_dev_to_of_node(edev), dn)
- eeh_restore_bars(of_node_to_eeh_dev(dn));
-}
-
/**
* eeh_save_bars - Save device bars
* @edev: PCI device associated EEH device
@@ -808,7 +596,7 @@ void eeh_restore_bars(struct eeh_dev *edev)
* PCI devices are added individually; but, for the restore,
* an entire slot is reset at a time.
*/
-static void eeh_save_bars(struct eeh_dev *edev)
+void eeh_save_bars(struct eeh_dev *edev)
{
int i;
struct device_node *dn;
@@ -822,102 +610,6 @@ static void eeh_save_bars(struct eeh_dev *edev)
}
/**
- * eeh_early_enable - Early enable EEH on the indicated device
- * @dn: device node
- * @data: BUID
- *
- * Enable EEH functionality on the specified PCI device. The function
- * is expected to be called before real PCI probing is done. However,
- * the PHBs have been initialized at this point.
- */
-static void *eeh_early_enable(struct device_node *dn, void *data)
-{
- int ret;
- const u32 *class_code = of_get_property(dn, "class-code", NULL);
- const u32 *vendor_id = of_get_property(dn, "vendor-id", NULL);
- const u32 *device_id = of_get_property(dn, "device-id", NULL);
- const u32 *regs;
- int enable;
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
-
- edev->class_code = 0;
- edev->mode = 0;
- edev->check_count = 0;
- edev->freeze_count = 0;
- edev->false_positives = 0;
-
- if (!of_device_is_available(dn))
- return NULL;
-
- /* Ignore bad nodes. */
- if (!class_code || !vendor_id || !device_id)
- return NULL;
-
- /* There is nothing to check on PCI to ISA bridges */
- if (dn->type && !strcmp(dn->type, "isa")) {
- edev->mode |= EEH_MODE_NOCHECK;
- return NULL;
- }
- edev->class_code = *class_code;
-
- /* Ok... see if this device supports EEH. Some do, some don't,
- * and the only way to find out is to check each and every one.
- */
- regs = of_get_property(dn, "reg", NULL);
- if (regs) {
- /* First register entry is addr (00BBSS00) */
- /* Try to enable eeh */
- ret = eeh_ops->set_option(dn, EEH_OPT_ENABLE);
-
- enable = 0;
- if (ret == 0) {
- edev->config_addr = regs[0];
-
- /* If the newer, better, ibm,get-config-addr-info is supported,
- * then use that instead.
- */
- edev->pe_config_addr = eeh_ops->get_pe_addr(dn);
-
- /* Some older systems (Power4) allow the
- * ibm,set-eeh-option call to succeed even on nodes
- * where EEH is not supported. Verify support
- * explicitly.
- */
- ret = eeh_ops->get_state(dn, NULL);
- if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
- enable = 1;
- }
-
- if (enable) {
- eeh_subsystem_enabled = 1;
- edev->mode |= EEH_MODE_SUPPORTED;
-
- pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n",
- dn->full_name, edev->config_addr,
- edev->pe_config_addr);
- } else {
-
- /* This device doesn't support EEH, but it may have an
- * EEH parent, in which case we mark it as supported.
- */
- if (dn->parent && of_node_to_eeh_dev(dn->parent) &&
- (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) {
- /* Parent supports EEH. */
- edev->mode |= EEH_MODE_SUPPORTED;
- edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr;
- return NULL;
- }
- }
- } else {
- printk(KERN_WARNING "EEH: %s: unable to get reg property.\n",
- dn->full_name);
- }
-
- eeh_save_bars(edev);
- return NULL;
-}
-
-/**
* eeh_ops_register - Register platform dependent EEH operations
* @ops: platform dependent EEH operations
*
@@ -982,7 +674,7 @@ int __exit eeh_ops_unregister(const char *name)
* Even if force-off is set, the EEH hardware is still enabled, so that
* newer systems can boot.
*/
-void __init eeh_init(void)
+static int __init eeh_init(void)
{
struct pci_controller *hose, *tmp;
struct device_node *phb;
@@ -992,27 +684,34 @@ void __init eeh_init(void)
if (!eeh_ops) {
pr_warning("%s: Platform EEH operation not found\n",
__func__);
- return;
+ return -EEXIST;
} else if ((ret = eeh_ops->init())) {
pr_warning("%s: Failed to call platform init function (%d)\n",
__func__, ret);
- return;
+ return ret;
}
raw_spin_lock_init(&confirm_error_lock);
/* Enable EEH for all adapters */
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
- phb = hose->dn;
- traverse_pci_devices(phb, eeh_early_enable, NULL);
+ if (eeh_probe_mode_devtree()) {
+ list_for_each_entry_safe(hose, tmp,
+ &hose_list, list_node) {
+ phb = hose->dn;
+ traverse_pci_devices(phb, eeh_ops->of_probe, NULL);
+ }
}
if (eeh_subsystem_enabled)
- printk(KERN_INFO "EEH: PCI Enhanced I/O Error Handling Enabled\n");
+ pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
else
- printk(KERN_WARNING "EEH: No capable adapters found\n");
+ pr_warning("EEH: No capable adapters found\n");
+
+ return ret;
}
+core_initcall_sync(eeh_init);
+
/**
* eeh_add_device_early - Enable EEH for the indicated device_node
* @dn: device node for which to set up EEH
@@ -1029,7 +728,7 @@ static void eeh_add_device_early(struct device_node *dn)
{
struct pci_controller *phb;
- if (!dn || !of_node_to_eeh_dev(dn))
+ if (!of_node_to_eeh_dev(dn))
return;
phb = of_node_to_eeh_dev(dn)->phb;
@@ -1037,7 +736,8 @@ static void eeh_add_device_early(struct device_node *dn)
if (NULL == phb || 0 == phb->buid)
return;
- eeh_early_enable(dn, NULL);
+ /* FIXME: hotplug support on POWERNV */
+ eeh_ops->of_probe(dn, NULL);
}
/**
@@ -1087,7 +787,7 @@ static void eeh_add_device_late(struct pci_dev *dev)
edev->pdev = dev;
dev->dev.archdata.edev = edev;
- pci_addr_cache_insert_device(dev);
+ eeh_addr_cache_insert_dev(dev);
eeh_sysfs_add_device(dev);
}
@@ -1117,6 +817,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
/**
* eeh_remove_device - Undo EEH setup for the indicated pci device
* @dev: pci device to be removed
+ * @purge_pe: remove the PE or not
*
* This routine should be called when a device is removed from
* a running system (e.g. by hotplug or dlpar). It unregisters
@@ -1124,7 +825,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
* this device will no longer be detected after this call; thus,
* i/o errors affecting this slot may leave this device unusable.
*/
-static void eeh_remove_device(struct pci_dev *dev)
+static void eeh_remove_device(struct pci_dev *dev, int purge_pe)
{
struct eeh_dev *edev;
@@ -1143,28 +844,30 @@ static void eeh_remove_device(struct pci_dev *dev)
dev->dev.archdata.edev = NULL;
pci_dev_put(dev);
- pci_addr_cache_remove_device(dev);
+ eeh_rmv_from_parent_pe(edev, purge_pe);
+ eeh_addr_cache_rmv_dev(dev);
eeh_sysfs_remove_device(dev);
}
/**
* eeh_remove_bus_device - Undo EEH setup for the indicated PCI device
* @dev: PCI device
+ * @purge_pe: remove the corresponding PE or not
*
* This routine must be called when a device is removed from the
* running system through hotplug or dlpar. The corresponding
* PCI address cache will be removed.
*/
-void eeh_remove_bus_device(struct pci_dev *dev)
+void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe)
{
struct pci_bus *bus = dev->subordinate;
struct pci_dev *child, *tmp;
- eeh_remove_device(dev);
+ eeh_remove_device(dev, purge_pe);
if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
- eeh_remove_bus_device(child);
+ eeh_remove_bus_device(child, purge_pe);
}
}
EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index e5ae1c687c66..5a4c87903057 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -50,6 +50,7 @@ struct pci_io_addr_range {
struct rb_node rb_node;
unsigned long addr_lo;
unsigned long addr_hi;
+ struct eeh_dev *edev;
struct pci_dev *pcidev;
unsigned int flags;
};
@@ -59,7 +60,7 @@ static struct pci_io_addr_cache {
spinlock_t piar_lock;
} pci_io_addr_cache_root;
-static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr)
+static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr)
{
struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
@@ -74,7 +75,7 @@ static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr)
n = n->rb_right;
} else {
pci_dev_get(piar->pcidev);
- return piar->pcidev;
+ return piar->edev;
}
}
}
@@ -83,7 +84,7 @@ static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr)
}
/**
- * pci_addr_cache_get_device - Get device, given only address
+ * eeh_addr_cache_get_dev - Get device, given only address
* @addr: mmio (PIO) phys address or i/o port number
*
* Given an mmio phys address, or a port number, find a pci device
@@ -92,15 +93,15 @@ static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr)
* from zero (that is, they do *not* have pci_io_addr added in).
* It is safe to call this function within an interrupt.
*/
-struct pci_dev *pci_addr_cache_get_device(unsigned long addr)
+struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr)
{
- struct pci_dev *dev;
+ struct eeh_dev *edev;
unsigned long flags;
spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
- dev = __pci_addr_cache_get_device(addr);
+ edev = __eeh_addr_cache_get_device(addr);
spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
- return dev;
+ return edev;
}
#ifdef DEBUG
@@ -108,7 +109,7 @@ struct pci_dev *pci_addr_cache_get_device(unsigned long addr)
* Handy-dandy debug print routine, does nothing more
* than print out the contents of our addr cache.
*/
-static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
+static void eeh_addr_cache_print(struct pci_io_addr_cache *cache)
{
struct rb_node *n;
int cnt = 0;
@@ -117,7 +118,7 @@ static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
while (n) {
struct pci_io_addr_range *piar;
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
- printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n",
+ pr_debug("PCI: %s addr range %d [%lx-%lx]: %s\n",
(piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev));
cnt++;
@@ -128,7 +129,7 @@ static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
/* Insert address range into the rb tree. */
static struct pci_io_addr_range *
-pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
+eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
unsigned long ahi, unsigned int flags)
{
struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
@@ -146,23 +147,24 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
} else {
if (dev != piar->pcidev ||
alo != piar->addr_lo || ahi != piar->addr_hi) {
- printk(KERN_WARNING "PIAR: overlapping address range\n");
+ pr_warning("PIAR: overlapping address range\n");
}
return piar;
}
}
- piar = kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
+ piar = kzalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
if (!piar)
return NULL;
pci_dev_get(dev);
piar->addr_lo = alo;
piar->addr_hi = ahi;
+ piar->edev = pci_dev_to_eeh_dev(dev);
piar->pcidev = dev;
piar->flags = flags;
#ifdef DEBUG
- printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n",
+ pr_debug("PIAR: insert range=[%lx:%lx] dev=%s\n",
alo, ahi, pci_name(dev));
#endif
@@ -172,7 +174,7 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
return piar;
}
-static void __pci_addr_cache_insert_device(struct pci_dev *dev)
+static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
{
struct device_node *dn;
struct eeh_dev *edev;
@@ -180,7 +182,7 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
dn = pci_device_to_OF_node(dev);
if (!dn) {
- printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev));
+ pr_warning("PCI: no pci dn found for dev=%s\n", pci_name(dev));
return;
}
@@ -192,8 +194,7 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
}
/* Skip any devices for which EEH is not enabled. */
- if (!(edev->mode & EEH_MODE_SUPPORTED) ||
- edev->mode & EEH_MODE_NOCHECK) {
+ if (!edev->pe) {
#ifdef DEBUG
pr_info("PCI: skip building address cache for=%s - %s\n",
pci_name(dev), dn->full_name);
@@ -212,19 +213,19 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
continue;
if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
continue;
- pci_addr_cache_insert(dev, start, end, flags);
+ eeh_addr_cache_insert(dev, start, end, flags);
}
}
/**
- * pci_addr_cache_insert_device - Add a device to the address cache
+ * eeh_addr_cache_insert_dev - Add a device to the address cache
* @dev: PCI device whose I/O addresses we are interested in.
*
* In order to support the fast lookup of devices based on addresses,
* we maintain a cache of devices that can be quickly searched.
* This routine adds a device to that cache.
*/
-void pci_addr_cache_insert_device(struct pci_dev *dev)
+void eeh_addr_cache_insert_dev(struct pci_dev *dev)
{
unsigned long flags;
@@ -233,11 +234,11 @@ void pci_addr_cache_insert_device(struct pci_dev *dev)
return;
spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
- __pci_addr_cache_insert_device(dev);
+ __eeh_addr_cache_insert_dev(dev);
spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
}
-static inline void __pci_addr_cache_remove_device(struct pci_dev *dev)
+static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev)
{
struct rb_node *n;
@@ -258,7 +259,7 @@ restart:
}
/**
- * pci_addr_cache_remove_device - remove pci device from addr cache
+ * eeh_addr_cache_rmv_dev - remove pci device from addr cache
* @dev: device to remove
*
* Remove a device from the addr-cache tree.
@@ -266,17 +267,17 @@ restart:
* the tree multiple times (once per resource).
* But so what; device removal doesn't need to be that fast.
*/
-void pci_addr_cache_remove_device(struct pci_dev *dev)
+void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
{
unsigned long flags;
spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
- __pci_addr_cache_remove_device(dev);
+ __eeh_addr_cache_rmv_dev(dev);
spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
}
/**
- * pci_addr_cache_build - Build a cache of I/O addresses
+ * eeh_addr_cache_build - Build a cache of I/O addresses
*
* Build a cache of pci i/o addresses. This cache will be used to
* find the pci device that corresponds to a given address.
@@ -284,7 +285,7 @@ void pci_addr_cache_remove_device(struct pci_dev *dev)
* Must be run late in boot process, after the pci controllers
* have been scanned for devices (after all device resources are known).
*/
-void __init pci_addr_cache_build(void)
+void __init eeh_addr_cache_build(void)
{
struct device_node *dn;
struct eeh_dev *edev;
@@ -293,7 +294,7 @@ void __init pci_addr_cache_build(void)
spin_lock_init(&pci_io_addr_cache_root.piar_lock);
for_each_pci_dev(dev) {
- pci_addr_cache_insert_device(dev);
+ eeh_addr_cache_insert_dev(dev);
dn = pci_device_to_OF_node(dev);
if (!dn)
@@ -312,7 +313,7 @@ void __init pci_addr_cache_build(void)
#ifdef DEBUG
/* Verify tree built up above, echo back the list of addrs. */
- pci_addr_cache_print(&pci_io_addr_cache_root);
+ eeh_addr_cache_print(&pci_io_addr_cache_root);
#endif
}
diff --git a/arch/powerpc/platforms/pseries/eeh_dev.c b/arch/powerpc/platforms/pseries/eeh_dev.c
index c4507d095900..66442341d3a6 100644
--- a/arch/powerpc/platforms/pseries/eeh_dev.c
+++ b/arch/powerpc/platforms/pseries/eeh_dev.c
@@ -55,7 +55,7 @@ void * __devinit eeh_dev_init(struct device_node *dn, void *data)
struct eeh_dev *edev;
/* Allocate EEH device */
- edev = zalloc_maybe_bootmem(sizeof(*edev), GFP_KERNEL);
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev) {
pr_warning("%s: out of memory\n", __func__);
return NULL;
@@ -65,6 +65,7 @@ void * __devinit eeh_dev_init(struct device_node *dn, void *data)
PCI_DN(dn)->edev = edev;
edev->dn = dn;
edev->phb = phb;
+ INIT_LIST_HEAD(&edev->list);
return NULL;
}
@@ -80,6 +81,9 @@ void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb)
{
struct device_node *dn = phb->dn;
+ /* EEH PE for PHB */
+ eeh_phb_pe_create(phb);
+
/* EEH device for PHB */
eeh_dev_init(dn, phb);
@@ -93,10 +97,16 @@ void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb)
* Scan all the existing PHBs and create EEH devices for their OF
* nodes and their children OF nodes
*/
-void __init eeh_dev_phb_init(void)
+static int __init eeh_dev_phb_init(void)
{
struct pci_controller *phb, *tmp;
list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
eeh_dev_phb_init_dynamic(phb);
+
+ pr_info("EEH: devices created\n");
+
+ return 0;
}
+
+core_initcall(eeh_dev_phb_init);
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index baf92cd9dfab..a3fefb61097c 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -25,6 +25,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <asm/eeh.h>
#include <asm/eeh_event.h>
@@ -47,6 +48,41 @@ static inline const char *eeh_pcid_name(struct pci_dev *pdev)
return "";
}
+/**
+ * eeh_pcid_get - Get the PCI device driver
+ * @pdev: PCI device
+ *
+ * The function is used to retrieve the PCI device driver for
+ * the indicated PCI device. Besides, we will increase the reference
+ * of the PCI device driver to prevent that being unloaded on
+ * the fly. Otherwise, kernel crash would be seen.
+ */
+static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
+{
+ if (!pdev || !pdev->driver)
+ return NULL;
+
+ if (!try_module_get(pdev->driver->driver.owner))
+ return NULL;
+
+ return pdev->driver;
+}
+
+/**
+ * eeh_pcid_put - Dereference on the PCI device driver
+ * @pdev: PCI device
+ *
+ * The function is called to do dereference on the PCI device
+ * driver of the indicated PCI device.
+ */
+static inline void eeh_pcid_put(struct pci_dev *pdev)
+{
+ if (!pdev || !pdev->driver)
+ return;
+
+ module_put(pdev->driver->driver.owner);
+}
+
#if 0
static void print_device_node_tree(struct pci_dn *pdn, int dent)
{
@@ -93,7 +129,7 @@ static void eeh_disable_irq(struct pci_dev *dev)
if (!irq_has_action(dev->irq))
return;
- edev->mode |= EEH_MODE_IRQ_DISABLED;
+ edev->mode |= EEH_DEV_IRQ_DISABLED;
disable_irq_nosync(dev->irq);
}
@@ -108,36 +144,44 @@ static void eeh_enable_irq(struct pci_dev *dev)
{
struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
- if ((edev->mode) & EEH_MODE_IRQ_DISABLED) {
- edev->mode &= ~EEH_MODE_IRQ_DISABLED;
+ if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
+ edev->mode &= ~EEH_DEV_IRQ_DISABLED;
enable_irq(dev->irq);
}
}
/**
* eeh_report_error - Report pci error to each device driver
- * @dev: PCI device
+ * @data: eeh device
* @userdata: return value
*
* Report an EEH error to each device driver, collect up and
* merge the device driver responses. Cumulative response
* passed back in "userdata".
*/
-static int eeh_report_error(struct pci_dev *dev, void *userdata)
+static void *eeh_report_error(void *data, void *userdata)
{
+ struct eeh_dev *edev = (struct eeh_dev *)data;
+ struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
enum pci_ers_result rc, *res = userdata;
- struct pci_driver *driver = dev->driver;
+ struct pci_driver *driver;
+ /* We might not have the associated PCI device,
+ * then we should continue for next one.
+ */
+ if (!dev) return NULL;
dev->error_state = pci_channel_io_frozen;
- if (!driver)
- return 0;
+ driver = eeh_pcid_get(dev);
+ if (!driver) return NULL;
eeh_disable_irq(dev);
if (!driver->err_handler ||
- !driver->err_handler->error_detected)
- return 0;
+ !driver->err_handler->error_detected) {
+ eeh_pcid_put(dev);
+ return NULL;
+ }
rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
@@ -145,27 +189,34 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
- return 0;
+ eeh_pcid_put(dev);
+ return NULL;
}
/**
* eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
- * @dev: PCI device
+ * @data: eeh device
* @userdata: return value
*
* Tells each device driver that IO ports, MMIO and config space I/O
* are now enabled. Collects up and merges the device driver responses.
* Cumulative response passed back in "userdata".
*/
-static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+static void *eeh_report_mmio_enabled(void *data, void *userdata)
{
+ struct eeh_dev *edev = (struct eeh_dev *)data;
+ struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
enum pci_ers_result rc, *res = userdata;
- struct pci_driver *driver = dev->driver;
+ struct pci_driver *driver;
- if (!driver ||
- !driver->err_handler ||
- !driver->err_handler->mmio_enabled)
- return 0;
+ driver = eeh_pcid_get(dev);
+ if (!driver) return NULL;
+
+ if (!driver->err_handler ||
+ !driver->err_handler->mmio_enabled) {
+ eeh_pcid_put(dev);
+ return NULL;
+ }
rc = driver->err_handler->mmio_enabled(dev);
@@ -173,12 +224,13 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
- return 0;
+ eeh_pcid_put(dev);
+ return NULL;
}
/**
* eeh_report_reset - Tell device that slot has been reset
- * @dev: PCI device
+ * @data: eeh device
* @userdata: return value
*
* This routine must be called while EEH tries to reset particular
@@ -186,21 +238,26 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
* some actions, usually to save data the driver needs so that the
* driver can work again while the device is recovered.
*/
-static int eeh_report_reset(struct pci_dev *dev, void *userdata)
+static void *eeh_report_reset(void *data, void *userdata)
{
+ struct eeh_dev *edev = (struct eeh_dev *)data;
+ struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
enum pci_ers_result rc, *res = userdata;
- struct pci_driver *driver = dev->driver;
-
- if (!driver)
- return 0;
+ struct pci_driver *driver;
+ if (!dev) return NULL;
dev->error_state = pci_channel_io_normal;
+ driver = eeh_pcid_get(dev);
+ if (!driver) return NULL;
+
eeh_enable_irq(dev);
if (!driver->err_handler ||
- !driver->err_handler->slot_reset)
- return 0;
+ !driver->err_handler->slot_reset) {
+ eeh_pcid_put(dev);
+ return NULL;
+ }
rc = driver->err_handler->slot_reset(dev);
if ((*res == PCI_ERS_RESULT_NONE) ||
@@ -208,109 +265,115 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
if (*res == PCI_ERS_RESULT_DISCONNECT &&
rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
- return 0;
+ eeh_pcid_put(dev);
+ return NULL;
}
/**
* eeh_report_resume - Tell device to resume normal operations
- * @dev: PCI device
+ * @data: eeh device
* @userdata: return value
*
* This routine must be called to notify the device driver that it
* could resume so that the device driver can do some initialization
* to make the recovered device work again.
*/
-static int eeh_report_resume(struct pci_dev *dev, void *userdata)
+static void *eeh_report_resume(void *data, void *userdata)
{
- struct pci_driver *driver = dev->driver;
+ struct eeh_dev *edev = (struct eeh_dev *)data;
+ struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
+ struct pci_driver *driver;
+ if (!dev) return NULL;
dev->error_state = pci_channel_io_normal;
- if (!driver)
- return 0;
+ driver = eeh_pcid_get(dev);
+ if (!driver) return NULL;
eeh_enable_irq(dev);
if (!driver->err_handler ||
- !driver->err_handler->resume)
- return 0;
+ !driver->err_handler->resume) {
+ eeh_pcid_put(dev);
+ return NULL;
+ }
driver->err_handler->resume(dev);
- return 0;
+ eeh_pcid_put(dev);
+ return NULL;
}
/**
* eeh_report_failure - Tell device driver that device is dead.
- * @dev: PCI device
+ * @data: eeh device
* @userdata: return value
*
* This informs the device driver that the device is permanently
* dead, and that no further recovery attempts will be made on it.
*/
-static int eeh_report_failure(struct pci_dev *dev, void *userdata)
+static void *eeh_report_failure(void *data, void *userdata)
{
- struct pci_driver *driver = dev->driver;
+ struct eeh_dev *edev = (struct eeh_dev *)data;
+ struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
+ struct pci_driver *driver;
+ if (!dev) return NULL;
dev->error_state = pci_channel_io_perm_failure;
- if (!driver)
- return 0;
+ driver = eeh_pcid_get(dev);
+ if (!driver) return NULL;
eeh_disable_irq(dev);
if (!driver->err_handler ||
- !driver->err_handler->error_detected)
- return 0;
+ !driver->err_handler->error_detected) {
+ eeh_pcid_put(dev);
+ return NULL;
+ }
driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
- return 0;
+ eeh_pcid_put(dev);
+ return NULL;
}
/**
* eeh_reset_device - Perform actual reset of a pci slot
- * @edev: PE associated EEH device
+ * @pe: EEH PE
* @bus: PCI bus corresponding to the isolcated slot
*
* This routine must be called to do reset on the indicated PE.
* During the reset, udev might be invoked because those affected
* PCI devices will be removed and then added.
*/
-static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus)
+static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
{
- struct device_node *dn;
int cnt, rc;
/* pcibios will clear the counter; save the value */
- cnt = edev->freeze_count;
+ cnt = pe->freeze_count;
+ /*
+ * We don't remove the corresponding PE instances because
+ * we need the information afterwords. The attached EEH
+ * devices are expected to be attached soon when calling
+ * into pcibios_add_pci_devices().
+ */
if (bus)
- pcibios_remove_pci_devices(bus);
+ __pcibios_remove_pci_devices(bus, 0);
/* Reset the pci controller. (Asserts RST#; resets config space).
* Reconfigure bridges and devices. Don't try to bring the system
* up if the reset failed for some reason.
*/
- rc = eeh_reset_pe(edev);
+ rc = eeh_reset_pe(pe);
if (rc)
return rc;
- /* Walk over all functions on this device. */
- dn = eeh_dev_to_of_node(edev);
- if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
- dn = dn->parent->child;
-
- while (dn) {
- struct eeh_dev *pedev = of_node_to_eeh_dev(dn);
-
- /* On Power4, always true because eeh_pe_config_addr=0 */
- if (edev->pe_config_addr == pedev->pe_config_addr) {
- eeh_ops->configure_bridge(dn);
- eeh_restore_bars(pedev);
- }
- dn = dn->sibling;
- }
+ /* Restore PE */
+ eeh_ops->configure_bridge(pe);
+ eeh_pe_restore_bars(pe);
/* Give the system 5 seconds to finish running the user-space
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
@@ -322,7 +385,7 @@ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus)
ssleep(5);
pcibios_add_pci_devices(bus);
}
- edev->freeze_count = cnt;
+ pe->freeze_count = cnt;
return 0;
}
@@ -334,7 +397,7 @@ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus)
/**
* eeh_handle_event - Reset a PCI device after hard lockup.
- * @event: EEH event
+ * @pe: EEH PE
*
* While PHB detects address or data parity errors on particular PCI
* slot, the associated PE will be frozen. Besides, DMA's occurring
@@ -349,69 +412,24 @@ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus)
* drivers (which cause a second set of hotplug events to go out to
* userspace).
*/
-struct eeh_dev *handle_eeh_events(struct eeh_event *event)
+void eeh_handle_event(struct eeh_pe *pe)
{
- struct device_node *frozen_dn;
- struct eeh_dev *frozen_edev;
struct pci_bus *frozen_bus;
int rc = 0;
enum pci_ers_result result = PCI_ERS_RESULT_NONE;
- const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str;
-
- frozen_dn = eeh_find_device_pe(eeh_dev_to_of_node(event->edev));
- if (!frozen_dn) {
- location = of_get_property(eeh_dev_to_of_node(event->edev), "ibm,loc-code", NULL);
- location = location ? location : "unknown";
- printk(KERN_ERR "EEH: Error: Cannot find partition endpoint "
- "for location=%s pci addr=%s\n",
- location, eeh_pci_name(eeh_dev_to_pci_dev(event->edev)));
- return NULL;
- }
-
- frozen_bus = pcibios_find_pci_bus(frozen_dn);
- location = of_get_property(frozen_dn, "ibm,loc-code", NULL);
- location = location ? location : "unknown";
-
- /* There are two different styles for coming up with the PE.
- * In the old style, it was the highest EEH-capable device
- * which was always an EADS pci bridge. In the new style,
- * there might not be any EADS bridges, and even when there are,
- * the firmware marks them as "EEH incapable". So another
- * two-step is needed to find the pci bus..
- */
- if (!frozen_bus)
- frozen_bus = pcibios_find_pci_bus(frozen_dn->parent);
+ frozen_bus = eeh_pe_bus_get(pe);
if (!frozen_bus) {
- printk(KERN_ERR "EEH: Cannot find PCI bus "
- "for location=%s dn=%s\n",
- location, frozen_dn->full_name);
- return NULL;
+ pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
+ __func__, pe->phb->global_number, pe->addr);
+ return;
}
- frozen_edev = of_node_to_eeh_dev(frozen_dn);
- frozen_edev->freeze_count++;
- pci_str = eeh_pci_name(eeh_dev_to_pci_dev(event->edev));
- drv_str = eeh_pcid_name(eeh_dev_to_pci_dev(event->edev));
-
- if (frozen_edev->freeze_count > EEH_MAX_ALLOWED_FREEZES)
+ pe->freeze_count++;
+ if (pe->freeze_count > EEH_MAX_ALLOWED_FREEZES)
goto excess_failures;
-
- printk(KERN_WARNING
- "EEH: This PCI device has failed %d times in the last hour:\n",
- frozen_edev->freeze_count);
-
- if (frozen_edev->pdev) {
- bus_pci_str = pci_name(frozen_edev->pdev);
- bus_drv_str = eeh_pcid_name(frozen_edev->pdev);
- printk(KERN_WARNING
- "EEH: Bus location=%s driver=%s pci addr=%s\n",
- location, bus_drv_str, bus_pci_str);
- }
-
- printk(KERN_WARNING
- "EEH: Device location=%s driver=%s pci addr=%s\n",
- location, drv_str, pci_str);
+ pr_warning("EEH: This PCI device has failed %d times in the last hour\n",
+ pe->freeze_count);
/* Walk the various device drivers attached to this slot through
* a reset sequence, giving each an opportunity to do what it needs
@@ -419,12 +437,12 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
* status ... if any child can't handle the reset, then the entire
* slot is dlpar removed and added.
*/
- pci_walk_bus(frozen_bus, eeh_report_error, &result);
+ eeh_pe_dev_traverse(pe, eeh_report_error, &result);
/* Get the current PCI slot state. This can take a long time,
* sometimes over 3 seconds for certain systems.
*/
- rc = eeh_ops->wait_state(eeh_dev_to_of_node(frozen_edev), MAX_WAIT_FOR_RECOVERY*1000);
+ rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
printk(KERN_WARNING "EEH: Permanent failure\n");
goto hard_fail;
@@ -434,14 +452,14 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
* don't post the error log until after all dev drivers
* have been informed.
*/
- eeh_slot_error_detail(frozen_edev, EEH_LOG_TEMP);
+ eeh_slot_error_detail(pe, EEH_LOG_TEMP);
/* If all device drivers were EEH-unaware, then shut
* down all of the device drivers, and hope they
* go down willingly, without panicing the system.
*/
if (result == PCI_ERS_RESULT_NONE) {
- rc = eeh_reset_device(frozen_edev, frozen_bus);
+ rc = eeh_reset_device(pe, frozen_bus);
if (rc) {
printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc);
goto hard_fail;
@@ -450,7 +468,7 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
/* If all devices reported they can proceed, then re-enable MMIO */
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
- rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_MMIO);
+ rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
if (rc < 0)
goto hard_fail;
@@ -458,13 +476,13 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
result = PCI_ERS_RESULT_NEED_RESET;
} else {
result = PCI_ERS_RESULT_NONE;
- pci_walk_bus(frozen_bus, eeh_report_mmio_enabled, &result);
+ eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
}
}
/* If all devices reported they can proceed, then re-enable DMA */
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
- rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_DMA);
+ rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
if (rc < 0)
goto hard_fail;
@@ -482,13 +500,13 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
/* If any device called out for a reset, then reset the slot */
if (result == PCI_ERS_RESULT_NEED_RESET) {
- rc = eeh_reset_device(frozen_edev, NULL);
+ rc = eeh_reset_device(pe, NULL);
if (rc) {
printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc);
goto hard_fail;
}
result = PCI_ERS_RESULT_NONE;
- pci_walk_bus(frozen_bus, eeh_report_reset, &result);
+ eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
}
/* All devices should claim they have recovered by now. */
@@ -499,9 +517,9 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
}
/* Tell all device drivers that they can resume operations */
- pci_walk_bus(frozen_bus, eeh_report_resume, NULL);
+ eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
- return frozen_edev;
+ return;
excess_failures:
/*
@@ -509,30 +527,26 @@ excess_failures:
* are due to poorly seated PCI cards. Only 10% or so are
* due to actual, failed cards.
*/
- printk(KERN_ERR
- "EEH: PCI device at location=%s driver=%s pci addr=%s\n"
- "has failed %d times in the last hour "
- "and has been permanently disabled.\n"
- "Please try reseating this device or replacing it.\n",
- location, drv_str, pci_str, frozen_edev->freeze_count);
+ pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
+ "last hour and has been permanently disabled.\n"
+ "Please try reseating or replacing it.\n",
+ pe->phb->global_number, pe->addr,
+ pe->freeze_count);
goto perm_error;
hard_fail:
- printk(KERN_ERR
- "EEH: Unable to recover from failure of PCI device "
- "at location=%s driver=%s pci addr=%s\n"
- "Please try reseating this device or replacing it.\n",
- location, drv_str, pci_str);
+ pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
+ "Please try reseating or replacing it\n",
+ pe->phb->global_number, pe->addr);
perm_error:
- eeh_slot_error_detail(frozen_edev, EEH_LOG_PERM);
+ eeh_slot_error_detail(pe, EEH_LOG_PERM);
/* Notify all devices that they're about to go down. */
- pci_walk_bus(frozen_bus, eeh_report_failure, NULL);
+ eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
/* Shut down the device drivers for good. */
- pcibios_remove_pci_devices(frozen_bus);
-
- return NULL;
+ if (frozen_bus)
+ pcibios_remove_pci_devices(frozen_bus);
}
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index fb506317ebb0..51faaac8abe6 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -57,7 +57,7 @@ static int eeh_event_handler(void * dummy)
{
unsigned long flags;
struct eeh_event *event;
- struct eeh_dev *edev;
+ struct eeh_pe *pe;
set_task_comm(current, "eehd");
@@ -76,28 +76,23 @@ static int eeh_event_handler(void * dummy)
/* Serialize processing of EEH events */
mutex_lock(&eeh_event_mutex);
- edev = event->edev;
- eeh_mark_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
-
- printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
- eeh_pci_name(edev->pdev));
+ pe = event->pe;
+ eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
+ pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n",
+ pe->phb->global_number, pe->addr);
set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */
- edev = handle_eeh_events(event);
-
- if (edev) {
- eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
- pci_dev_put(edev->pdev);
- }
+ eeh_handle_event(pe);
+ eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
kfree(event);
mutex_unlock(&eeh_event_mutex);
/* If there are no new errors after an hour, clear the counter. */
- if (edev && edev->freeze_count>0) {
+ if (pe && pe->freeze_count > 0) {
msleep_interruptible(3600*1000);
- if (edev->freeze_count>0)
- edev->freeze_count--;
+ if (pe->freeze_count > 0)
+ pe->freeze_count--;
}
@@ -119,36 +114,23 @@ static void eeh_thread_launcher(struct work_struct *dummy)
/**
* eeh_send_failure_event - Generate a PCI error event
- * @edev: EEH device
+ * @pe: EEH PE
*
* This routine can be called within an interrupt context;
* the actual event will be delivered in a normal context
* (from a workqueue).
*/
-int eeh_send_failure_event(struct eeh_dev *edev)
+int eeh_send_failure_event(struct eeh_pe *pe)
{
unsigned long flags;
struct eeh_event *event;
- struct device_node *dn = eeh_dev_to_of_node(edev);
- const char *location;
-
- if (!mem_init_done) {
- printk(KERN_ERR "EEH: event during early boot not handled\n");
- location = of_get_property(dn, "ibm,loc-code", NULL);
- printk(KERN_ERR "EEH: device node = %s\n", dn->full_name);
- printk(KERN_ERR "EEH: PCI location = %s\n", location);
- return 1;
- }
- event = kmalloc(sizeof(*event), GFP_ATOMIC);
- if (event == NULL) {
- printk(KERN_ERR "EEH: out of memory, event not handled\n");
- return 1;
- }
-
- if (edev->pdev)
- pci_dev_get(edev->pdev);
- event->edev = edev;
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event) {
+ pr_err("EEH: out of memory, event not handled\n");
+ return -ENOMEM;
+ }
+ event->pe = pe;
/* We may or may not be called in an interrupt context */
spin_lock_irqsave(&eeh_eventlist_lock, flags);
diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/platforms/pseries/eeh_pe.c
new file mode 100644
index 000000000000..797cd181dc3f
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_pe.c
@@ -0,0 +1,652 @@
+/*
+ * The file intends to implement PE based on the information from
+ * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device.
+ * All the PEs should be organized as hierarchy tree. The first level
+ * of the tree will be associated to existing PHBs since the particular
+ * PE is only meaningful in one PHB domain.
+ *
+ * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/export.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
+
+static LIST_HEAD(eeh_phb_pe);
+
+/**
+ * eeh_pe_alloc - Allocate PE
+ * @phb: PCI controller
+ * @type: PE type
+ *
+ * Allocate PE instance dynamically.
+ */
+static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type)
+{
+ struct eeh_pe *pe;
+
+ /* Allocate PHB PE */
+ pe = kzalloc(sizeof(struct eeh_pe), GFP_KERNEL);
+ if (!pe) return NULL;
+
+ /* Initialize PHB PE */
+ pe->type = type;
+ pe->phb = phb;
+ INIT_LIST_HEAD(&pe->child_list);
+ INIT_LIST_HEAD(&pe->child);
+ INIT_LIST_HEAD(&pe->edevs);
+
+ return pe;
+}
+
+/**
+ * eeh_phb_pe_create - Create PHB PE
+ * @phb: PCI controller
+ *
+ * The function should be called while the PHB is detected during
+ * system boot or PCI hotplug in order to create PHB PE.
+ */
+int __devinit eeh_phb_pe_create(struct pci_controller *phb)
+{
+ struct eeh_pe *pe;
+
+ /* Allocate PHB PE */
+ pe = eeh_pe_alloc(phb, EEH_PE_PHB);
+ if (!pe) {
+ pr_err("%s: out of memory!\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Put it into the list */
+ eeh_lock();
+ list_add_tail(&pe->child, &eeh_phb_pe);
+ eeh_unlock();
+
+ pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number);
+
+ return 0;
+}
+
+/**
+ * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB
+ * @phb: PCI controller
+ *
+ * The overall PEs form hierarchy tree. The first layer of the
+ * hierarchy tree is composed of PHB PEs. The function is used
+ * to retrieve the corresponding PHB PE according to the given PHB.
+ */
+static struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb)
+{
+ struct eeh_pe *pe;
+
+ list_for_each_entry(pe, &eeh_phb_pe, child) {
+ /*
+ * Actually, we needn't check the type since
+ * the PE for PHB has been determined when that
+ * was created.
+ */
+ if ((pe->type & EEH_PE_PHB) && pe->phb == phb)
+ return pe;
+ }
+
+ return NULL;
+}
+
+/**
+ * eeh_pe_next - Retrieve the next PE in the tree
+ * @pe: current PE
+ * @root: root PE
+ *
+ * The function is used to retrieve the next PE in the
+ * hierarchy PE tree.
+ */
+static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe,
+ struct eeh_pe *root)
+{
+ struct list_head *next = pe->child_list.next;
+
+ if (next == &pe->child_list) {
+ while (1) {
+ if (pe == root)
+ return NULL;
+ next = pe->child.next;
+ if (next != &pe->parent->child_list)
+ break;
+ pe = pe->parent;
+ }
+ }
+
+ return list_entry(next, struct eeh_pe, child);
+}
+
+/**
+ * eeh_pe_traverse - Traverse PEs in the specified PHB
+ * @root: root PE
+ * @fn: callback
+ * @flag: extra parameter to callback
+ *
+ * The function is used to traverse the specified PE and its
+ * child PEs. The traversing is to be terminated once the
+ * callback returns something other than NULL, or no more PEs
+ * to be traversed.
+ */
+static void *eeh_pe_traverse(struct eeh_pe *root,
+ eeh_traverse_func fn, void *flag)
+{
+ struct eeh_pe *pe;
+ void *ret;
+
+ for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
+ ret = fn(pe, flag);
+ if (ret) return ret;
+ }
+
+ return NULL;
+}
+
+/**
+ * eeh_pe_dev_traverse - Traverse the devices from the PE
+ * @root: EEH PE
+ * @fn: function callback
+ * @flag: extra parameter to callback
+ *
+ * The function is used to traverse the devices of the specified
+ * PE and its child PEs.
+ */
+void *eeh_pe_dev_traverse(struct eeh_pe *root,
+ eeh_traverse_func fn, void *flag)
+{
+ struct eeh_pe *pe;
+ struct eeh_dev *edev;
+ void *ret;
+
+ if (!root) {
+ pr_warning("%s: Invalid PE %p\n", __func__, root);
+ return NULL;
+ }
+
+ eeh_lock();
+
+ /* Traverse root PE */
+ for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
+ eeh_pe_for_each_dev(pe, edev) {
+ ret = fn(edev, flag);
+ if (ret) {
+ eeh_unlock();
+ return ret;
+ }
+ }
+ }
+
+ eeh_unlock();
+
+ return NULL;
+}
+
+/**
+ * __eeh_pe_get - Check the PE address
+ * @data: EEH PE
+ * @flag: EEH device
+ *
+ * For one particular PE, it can be identified by PE address
+ * or tranditional BDF address. BDF address is composed of
+ * Bus/Device/Function number. The extra data referred by flag
+ * indicates which type of address should be used.
+ */
+static void *__eeh_pe_get(void *data, void *flag)
+{
+ struct eeh_pe *pe = (struct eeh_pe *)data;
+ struct eeh_dev *edev = (struct eeh_dev *)flag;
+
+ /* Unexpected PHB PE */
+ if (pe->type & EEH_PE_PHB)
+ return NULL;
+
+ /* We prefer PE address */
+ if (edev->pe_config_addr &&
+ (edev->pe_config_addr == pe->addr))
+ return pe;
+
+ /* Try BDF address */
+ if (edev->pe_config_addr &&
+ (edev->config_addr == pe->config_addr))
+ return pe;
+
+ return NULL;
+}
+
+/**
+ * eeh_pe_get - Search PE based on the given address
+ * @edev: EEH device
+ *
+ * Search the corresponding PE based on the specified address which
+ * is included in the eeh device. The function is used to check if
+ * the associated PE has been created against the PE address. It's
+ * notable that the PE address has 2 format: traditional PE address
+ * which is composed of PCI bus/device/function number, or unified
+ * PE address.
+ */
+static struct eeh_pe *eeh_pe_get(struct eeh_dev *edev)
+{
+ struct eeh_pe *root = eeh_phb_pe_get(edev->phb);
+ struct eeh_pe *pe;
+
+ pe = eeh_pe_traverse(root, __eeh_pe_get, edev);
+
+ return pe;
+}
+
+/**
+ * eeh_pe_get_parent - Retrieve the parent PE
+ * @edev: EEH device
+ *
+ * The whole PEs existing in the system are organized as hierarchy
+ * tree. The function is used to retrieve the parent PE according
+ * to the parent EEH device.
+ */
+static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev)
+{
+ struct device_node *dn;
+ struct eeh_dev *parent;
+
+ /*
+ * It might have the case for the indirect parent
+ * EEH device already having associated PE, but
+ * the direct parent EEH device doesn't have yet.
+ */
+ dn = edev->dn->parent;
+ while (dn) {
+ /* We're poking out of PCI territory */
+ if (!PCI_DN(dn)) return NULL;
+
+ parent = of_node_to_eeh_dev(dn);
+ /* We're poking out of PCI territory */
+ if (!parent) return NULL;
+
+ if (parent->pe)
+ return parent->pe;
+
+ dn = dn->parent;
+ }
+
+ return NULL;
+}
+
+/**
+ * eeh_add_to_parent_pe - Add EEH device to parent PE
+ * @edev: EEH device
+ *
+ * Add EEH device to the parent PE. If the parent PE already
+ * exists, the PE type will be changed to EEH_PE_BUS. Otherwise,
+ * we have to create new PE to hold the EEH device and the new
+ * PE will be linked to its parent PE as well.
+ */
+int eeh_add_to_parent_pe(struct eeh_dev *edev)
+{
+ struct eeh_pe *pe, *parent;
+
+ eeh_lock();
+
+ /*
+ * Search the PE has been existing or not according
+ * to the PE address. If that has been existing, the
+ * PE should be composed of PCI bus and its subordinate
+ * components.
+ */
+ pe = eeh_pe_get(edev);
+ if (pe && !(pe->type & EEH_PE_INVALID)) {
+ if (!edev->pe_config_addr) {
+ eeh_unlock();
+ pr_err("%s: PE with addr 0x%x already exists\n",
+ __func__, edev->config_addr);
+ return -EEXIST;
+ }
+
+ /* Mark the PE as type of PCI bus */
+ pe->type = EEH_PE_BUS;
+ edev->pe = pe;
+
+ /* Put the edev to PE */
+ list_add_tail(&edev->list, &pe->edevs);
+ eeh_unlock();
+ pr_debug("EEH: Add %s to Bus PE#%x\n",
+ edev->dn->full_name, pe->addr);
+
+ return 0;
+ } else if (pe && (pe->type & EEH_PE_INVALID)) {
+ list_add_tail(&edev->list, &pe->edevs);
+ edev->pe = pe;
+ /*
+ * We're running to here because of PCI hotplug caused by
+ * EEH recovery. We need clear EEH_PE_INVALID until the top.
+ */
+ parent = pe;
+ while (parent) {
+ if (!(parent->type & EEH_PE_INVALID))
+ break;
+ parent->type &= ~EEH_PE_INVALID;
+ parent = parent->parent;
+ }
+ eeh_unlock();
+ pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
+ edev->dn->full_name, pe->addr, pe->parent->addr);
+
+ return 0;
+ }
+
+ /* Create a new EEH PE */
+ pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE);
+ if (!pe) {
+ eeh_unlock();
+ pr_err("%s: out of memory!\n", __func__);
+ return -ENOMEM;
+ }
+ pe->addr = edev->pe_config_addr;
+ pe->config_addr = edev->config_addr;
+
+ /*
+ * Put the new EEH PE into hierarchy tree. If the parent
+ * can't be found, the newly created PE will be attached
+ * to PHB directly. Otherwise, we have to associate the
+ * PE with its parent.
+ */
+ parent = eeh_pe_get_parent(edev);
+ if (!parent) {
+ parent = eeh_phb_pe_get(edev->phb);
+ if (!parent) {
+ eeh_unlock();
+ pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
+ __func__, edev->phb->global_number);
+ edev->pe = NULL;
+ kfree(pe);
+ return -EEXIST;
+ }
+ }
+ pe->parent = parent;
+
+ /*
+ * Put the newly created PE into the child list and
+ * link the EEH device accordingly.
+ */
+ list_add_tail(&pe->child, &parent->child_list);
+ list_add_tail(&edev->list, &pe->edevs);
+ edev->pe = pe;
+ eeh_unlock();
+ pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
+ edev->dn->full_name, pe->addr, pe->parent->addr);
+
+ return 0;
+}
+
+/**
+ * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE
+ * @edev: EEH device
+ * @purge_pe: remove PE or not
+ *
+ * The PE hierarchy tree might be changed when doing PCI hotplug.
+ * Also, the PCI devices or buses could be removed from the system
+ * during EEH recovery. So we have to call the function remove the
+ * corresponding PE accordingly if necessary.
+ */
+int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
+{
+ struct eeh_pe *pe, *parent, *child;
+ int cnt;
+
+ if (!edev->pe) {
+ pr_warning("%s: No PE found for EEH device %s\n",
+ __func__, edev->dn->full_name);
+ return -EEXIST;
+ }
+
+ eeh_lock();
+
+ /* Remove the EEH device */
+ pe = edev->pe;
+ edev->pe = NULL;
+ list_del(&edev->list);
+
+ /*
+ * Check if the parent PE includes any EEH devices.
+ * If not, we should delete that. Also, we should
+ * delete the parent PE if it doesn't have associated
+ * child PEs and EEH devices.
+ */
+ while (1) {
+ parent = pe->parent;
+ if (pe->type & EEH_PE_PHB)
+ break;
+
+ if (purge_pe) {
+ if (list_empty(&pe->edevs) &&
+ list_empty(&pe->child_list)) {
+ list_del(&pe->child);
+ kfree(pe);
+ } else {
+ break;
+ }
+ } else {
+ if (list_empty(&pe->edevs)) {
+ cnt = 0;
+ list_for_each_entry(child, &pe->child_list, child) {
+ if (!(pe->type & EEH_PE_INVALID)) {
+ cnt++;
+ break;
+ }
+ }
+
+ if (!cnt)
+ pe->type |= EEH_PE_INVALID;
+ else
+ break;
+ }
+ }
+
+ pe = parent;
+ }
+
+ eeh_unlock();
+
+ return 0;
+}
+
+/**
+ * __eeh_pe_state_mark - Mark the state for the PE
+ * @data: EEH PE
+ * @flag: state
+ *
+ * The function is used to mark the indicated state for the given
+ * PE. Also, the associated PCI devices will be put into IO frozen
+ * state as well.
+ */
+static void *__eeh_pe_state_mark(void *data, void *flag)
+{
+ struct eeh_pe *pe = (struct eeh_pe *)data;
+ int state = *((int *)flag);
+ struct eeh_dev *tmp;
+ struct pci_dev *pdev;
+
+ /*
+ * Mark the PE with the indicated state. Also,
+ * the associated PCI device will be put into
+ * I/O frozen state to avoid I/O accesses from
+ * the PCI device driver.
+ */
+ pe->state |= state;
+ eeh_pe_for_each_dev(pe, tmp) {
+ pdev = eeh_dev_to_pci_dev(tmp);
+ if (pdev)
+ pdev->error_state = pci_channel_io_frozen;
+ }
+
+ return NULL;
+}
+
+/**
+ * eeh_pe_state_mark - Mark specified state for PE and its associated device
+ * @pe: EEH PE
+ *
+ * EEH error affects the current PE and its child PEs. The function
+ * is used to mark appropriate state for the affected PEs and the
+ * associated devices.
+ */
+void eeh_pe_state_mark(struct eeh_pe *pe, int state)
+{
+ eeh_lock();
+ eeh_pe_traverse(pe, __eeh_pe_state_mark, &state);
+ eeh_unlock();
+}
+
+/**
+ * __eeh_pe_state_clear - Clear state for the PE
+ * @data: EEH PE
+ * @flag: state
+ *
+ * The function is used to clear the indicated state from the
+ * given PE. Besides, we also clear the check count of the PE
+ * as well.
+ */
+static void *__eeh_pe_state_clear(void *data, void *flag)
+{
+ struct eeh_pe *pe = (struct eeh_pe *)data;
+ int state = *((int *)flag);
+
+ pe->state &= ~state;
+ pe->check_count = 0;
+
+ return NULL;
+}
+
+/**
+ * eeh_pe_state_clear - Clear state for the PE and its children
+ * @pe: PE
+ * @state: state to be cleared
+ *
+ * When the PE and its children has been recovered from error,
+ * we need clear the error state for that. The function is used
+ * for the purpose.
+ */
+void eeh_pe_state_clear(struct eeh_pe *pe, int state)
+{
+ eeh_lock();
+ eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
+ eeh_unlock();
+}
+
+/**
+ * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
+ * @data: EEH device
+ * @flag: Unused
+ *
+ * Loads the PCI configuration space base address registers,
+ * the expansion ROM base address, the latency timer, and etc.
+ * from the saved values in the device node.
+ */
+static void *eeh_restore_one_device_bars(void *data, void *flag)
+{
+ int i;
+ u32 cmd;
+ struct eeh_dev *edev = (struct eeh_dev *)data;
+ struct device_node *dn = eeh_dev_to_of_node(edev);
+
+ for (i = 4; i < 10; i++)
+ eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
+ /* 12 == Expansion ROM Address */
+ eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
+
+#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
+#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
+
+ eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
+ SAVED_BYTE(PCI_CACHE_LINE_SIZE));
+ eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
+ SAVED_BYTE(PCI_LATENCY_TIMER));
+
+ /* max latency, min grant, interrupt pin and line */
+ eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
+
+ /*
+ * Restore PERR & SERR bits, some devices require it,
+ * don't touch the other command bits
+ */
+ eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd);
+ if (edev->config_space[1] & PCI_COMMAND_PARITY)
+ cmd |= PCI_COMMAND_PARITY;
+ else
+ cmd &= ~PCI_COMMAND_PARITY;
+ if (edev->config_space[1] & PCI_COMMAND_SERR)
+ cmd |= PCI_COMMAND_SERR;
+ else
+ cmd &= ~PCI_COMMAND_SERR;
+ eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
+
+ return NULL;
+}
+
+/**
+ * eeh_pe_restore_bars - Restore the PCI config space info
+ * @pe: EEH PE
+ *
+ * This routine performs a recursive walk to the children
+ * of this device as well.
+ */
+void eeh_pe_restore_bars(struct eeh_pe *pe)
+{
+ /*
+ * We needn't take the EEH lock since eeh_pe_dev_traverse()
+ * will take that.
+ */
+ eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
+}
+
+/**
+ * eeh_pe_bus_get - Retrieve PCI bus according to the given PE
+ * @pe: EEH PE
+ *
+ * Retrieve the PCI bus according to the given PE. Basically,
+ * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the
+ * primary PCI bus will be retrieved. The parent bus will be
+ * returned for BUS PE. However, we don't have associated PCI
+ * bus for DEVICE PE.
+ */
+struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
+{
+ struct pci_bus *bus = NULL;
+ struct eeh_dev *edev;
+ struct pci_dev *pdev;
+
+ eeh_lock();
+
+ if (pe->type & EEH_PE_PHB) {
+ bus = pe->phb->bus;
+ } else if (pe->type & EEH_PE_BUS) {
+ edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
+ pdev = eeh_dev_to_pci_dev(edev);
+ if (pdev)
+ bus = pdev->bus;
+ }
+
+ eeh_unlock();
+
+ return bus;
+}
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index c33360ec4f4f..19506f935737 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -129,27 +129,117 @@ static int pseries_eeh_init(void)
eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
}
+ /* Set EEH probe mode */
+ eeh_probe_mode_set(EEH_PROBE_MODE_DEVTREE);
+
return 0;
}
/**
+ * pseries_eeh_of_probe - EEH probe on the given device
+ * @dn: OF node
+ * @flag: Unused
+ *
+ * When EEH module is installed during system boot, all PCI devices
+ * are checked one by one to see if it supports EEH. The function
+ * is introduced for the purpose.
+ */
+static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
+{
+ struct eeh_dev *edev;
+ struct eeh_pe pe;
+ const u32 *class_code, *vendor_id, *device_id;
+ const u32 *regs;
+ int enable = 0;
+ int ret;
+
+ /* Retrieve OF node and eeh device */
+ edev = of_node_to_eeh_dev(dn);
+ if (!of_device_is_available(dn))
+ return NULL;
+
+ /* Retrieve class/vendor/device IDs */
+ class_code = of_get_property(dn, "class-code", NULL);
+ vendor_id = of_get_property(dn, "vendor-id", NULL);
+ device_id = of_get_property(dn, "device-id", NULL);
+
+ /* Skip for bad OF node or PCI-ISA bridge */
+ if (!class_code || !vendor_id || !device_id)
+ return NULL;
+ if (dn->type && !strcmp(dn->type, "isa"))
+ return NULL;
+
+ /* Update class code and mode of eeh device */
+ edev->class_code = *class_code;
+ edev->mode = 0;
+
+ /* Retrieve the device address */
+ regs = of_get_property(dn, "reg", NULL);
+ if (!regs) {
+ pr_warning("%s: OF node property %s::reg not found\n",
+ __func__, dn->full_name);
+ return NULL;
+ }
+
+ /* Initialize the fake PE */
+ memset(&pe, 0, sizeof(struct eeh_pe));
+ pe.phb = edev->phb;
+ pe.config_addr = regs[0];
+
+ /* Enable EEH on the device */
+ ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
+ if (!ret) {
+ edev->config_addr = regs[0];
+ /* Retrieve PE address */
+ edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
+ pe.addr = edev->pe_config_addr;
+
+ /* Some older systems (Power4) allow the ibm,set-eeh-option
+ * call to succeed even on nodes where EEH is not supported.
+ * Verify support explicitly.
+ */
+ ret = eeh_ops->get_state(&pe, NULL);
+ if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
+ enable = 1;
+
+ if (enable) {
+ eeh_subsystem_enabled = 1;
+ eeh_add_to_parent_pe(edev);
+
+ pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
+ __func__, dn->full_name, pe.phb->global_number,
+ pe.addr, pe.config_addr);
+ } else if (dn->parent && of_node_to_eeh_dev(dn->parent) &&
+ (of_node_to_eeh_dev(dn->parent))->pe) {
+ /* This device doesn't support EEH, but it may have an
+ * EEH parent, in which case we mark it as supported.
+ */
+ edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr;
+ edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr;
+ eeh_add_to_parent_pe(edev);
+ }
+ }
+
+ /* Save memory bars */
+ eeh_save_bars(edev);
+
+ return NULL;
+}
+
+/**
* pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
- * @dn: device node
+ * @pe: EEH PE
* @option: operation to be issued
*
* The function is used to control the EEH functionality globally.
* Currently, following options are support according to PAPR:
* Enable EEH, Disable EEH, Enable MMIO and Enable DMA
*/
-static int pseries_eeh_set_option(struct device_node *dn, int option)
+static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
{
int ret = 0;
- struct eeh_dev *edev;
- const u32 *reg;
int config_addr;
- edev = of_node_to_eeh_dev(dn);
-
/*
* When we're enabling or disabling EEH functioality on
* the particular PE, the PE config address is possibly
@@ -159,15 +249,11 @@ static int pseries_eeh_set_option(struct device_node *dn, int option)
switch (option) {
case EEH_OPT_DISABLE:
case EEH_OPT_ENABLE:
- reg = of_get_property(dn, "reg", NULL);
- config_addr = reg[0];
- break;
-
case EEH_OPT_THAW_MMIO:
case EEH_OPT_THAW_DMA:
- config_addr = edev->config_addr;
- if (edev->pe_config_addr)
- config_addr = edev->pe_config_addr;
+ config_addr = pe->config_addr;
+ if (pe->addr)
+ config_addr = pe->addr;
break;
default:
@@ -177,15 +263,15 @@ static int pseries_eeh_set_option(struct device_node *dn, int option)
}
ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
- config_addr, BUID_HI(edev->phb->buid),
- BUID_LO(edev->phb->buid), option);
+ config_addr, BUID_HI(pe->phb->buid),
+ BUID_LO(pe->phb->buid), option);
return ret;
}
/**
* pseries_eeh_get_pe_addr - Retrieve PE address
- * @dn: device node
+ * @pe: EEH PE
*
* Retrieve the assocated PE address. Actually, there're 2 RTAS
* function calls dedicated for the purpose. We need implement
@@ -196,14 +282,11 @@ static int pseries_eeh_set_option(struct device_node *dn, int option)
* It's notable that zero'ed return value means invalid PE config
* address.
*/
-static int pseries_eeh_get_pe_addr(struct device_node *dn)
+static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
{
- struct eeh_dev *edev;
int ret = 0;
int rets[3];
- edev = of_node_to_eeh_dev(dn);
-
if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
/*
* First of all, we need to make sure there has one PE
@@ -211,18 +294,18 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn)
* meaningless.
*/
ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
- edev->config_addr, BUID_HI(edev->phb->buid),
- BUID_LO(edev->phb->buid), 1);
+ pe->config_addr, BUID_HI(pe->phb->buid),
+ BUID_LO(pe->phb->buid), 1);
if (ret || (rets[0] == 0))
return 0;
/* Retrieve the associated PE config address */
ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
- edev->config_addr, BUID_HI(edev->phb->buid),
- BUID_LO(edev->phb->buid), 0);
+ pe->config_addr, BUID_HI(pe->phb->buid),
+ BUID_LO(pe->phb->buid), 0);
if (ret) {
- pr_warning("%s: Failed to get PE address for %s\n",
- __func__, dn->full_name);
+ pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n",
+ __func__, pe->phb->global_number, pe->config_addr);
return 0;
}
@@ -231,11 +314,11 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn)
if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
- edev->config_addr, BUID_HI(edev->phb->buid),
- BUID_LO(edev->phb->buid), 0);
+ pe->config_addr, BUID_HI(pe->phb->buid),
+ BUID_LO(pe->phb->buid), 0);
if (ret) {
- pr_warning("%s: Failed to get PE address for %s\n",
- __func__, dn->full_name);
+ pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n",
+ __func__, pe->phb->global_number, pe->config_addr);
return 0;
}
@@ -247,7 +330,7 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn)
/**
* pseries_eeh_get_state - Retrieve PE state
- * @dn: PE associated device node
+ * @pe: EEH PE
* @state: return value
*
* Retrieve the state of the specified PE. On RTAS compliant
@@ -258,30 +341,28 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn)
* RTAS calls for the purpose, we need to try the new one and back
* to the old one if the new one couldn't work properly.
*/
-static int pseries_eeh_get_state(struct device_node *dn, int *state)
+static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
{
- struct eeh_dev *edev;
int config_addr;
int ret;
int rets[4];
int result;
/* Figure out PE config address if possible */
- edev = of_node_to_eeh_dev(dn);
- config_addr = edev->config_addr;
- if (edev->pe_config_addr)
- config_addr = edev->pe_config_addr;
+ config_addr = pe->config_addr;
+ if (pe->addr)
+ config_addr = pe->addr;
if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
- config_addr, BUID_HI(edev->phb->buid),
- BUID_LO(edev->phb->buid));
+ config_addr, BUID_HI(pe->phb->buid),
+ BUID_LO(pe->phb->buid));
} else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
/* Fake PE unavailable info */
rets[2] = 0;
ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
- config_addr, BUID_HI(edev->phb->buid),
- BUID_LO(edev->phb->buid));
+ config_addr, BUID_HI(pe->phb->buid),
+ BUID_LO(pe->phb->buid));
} else {
return EEH_STATE_NOT_SUPPORT;
}
@@ -333,34 +414,32 @@ static int pseries_eeh_get_state(struct device_node *dn, int *state)
/**
* pseries_eeh_reset - Reset the specified PE
- * @dn: PE associated device node
+ * @pe: EEH PE
* @option: reset option
*
* Reset the specified PE
*/
-static int pseries_eeh_reset(struct device_node *dn, int option)
+static int pseries_eeh_reset(struct eeh_pe *pe, int option)
{
- struct eeh_dev *edev;
int config_addr;
int ret;
/* Figure out PE address */
- edev = of_node_to_eeh_dev(dn);
- config_addr = edev->config_addr;
- if (edev->pe_config_addr)
- config_addr = edev->pe_config_addr;
+ config_addr = pe->config_addr;
+ if (pe->addr)
+ config_addr = pe->addr;
/* Reset PE through RTAS call */
ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
- config_addr, BUID_HI(edev->phb->buid),
- BUID_LO(edev->phb->buid), option);
+ config_addr, BUID_HI(pe->phb->buid),
+ BUID_LO(pe->phb->buid), option);
/* If fundamental-reset not supported, try hot-reset */
if (option == EEH_RESET_FUNDAMENTAL &&
ret == -8) {
ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
- config_addr, BUID_HI(edev->phb->buid),
- BUID_LO(edev->phb->buid), EEH_RESET_HOT);
+ config_addr, BUID_HI(pe->phb->buid),
+ BUID_LO(pe->phb->buid), EEH_RESET_HOT);
}
return ret;
@@ -368,13 +447,13 @@ static int pseries_eeh_reset(struct device_node *dn, int option)
/**
* pseries_eeh_wait_state - Wait for PE state
- * @dn: PE associated device node
+ * @pe: EEH PE
* @max_wait: maximal period in microsecond
*
* Wait for the state of associated PE. It might take some time
* to retrieve the PE's state.
*/
-static int pseries_eeh_wait_state(struct device_node *dn, int max_wait)
+static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait)
{
int ret;
int mwait;
@@ -391,7 +470,7 @@ static int pseries_eeh_wait_state(struct device_node *dn, int max_wait)
#define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
while (1) {
- ret = pseries_eeh_get_state(dn, &mwait);
+ ret = pseries_eeh_get_state(pe, &mwait);
/*
* If the PE's state is temporarily unavailable,
@@ -426,7 +505,7 @@ static int pseries_eeh_wait_state(struct device_node *dn, int max_wait)
/**
* pseries_eeh_get_log - Retrieve error log
- * @dn: device node
+ * @pe: EEH PE
* @severity: temporary or permanent error log
* @drv_log: driver log to be combined with retrieved error log
* @len: length of driver log
@@ -435,24 +514,22 @@ static int pseries_eeh_wait_state(struct device_node *dn, int max_wait)
* Actually, the error will be retrieved through the dedicated
* RTAS call.
*/
-static int pseries_eeh_get_log(struct device_node *dn, int severity, char *drv_log, unsigned long len)
+static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len)
{
- struct eeh_dev *edev;
int config_addr;
unsigned long flags;
int ret;
- edev = of_node_to_eeh_dev(dn);
spin_lock_irqsave(&slot_errbuf_lock, flags);
memset(slot_errbuf, 0, eeh_error_buf_size);
/* Figure out the PE address */
- config_addr = edev->config_addr;
- if (edev->pe_config_addr)
- config_addr = edev->pe_config_addr;
+ config_addr = pe->config_addr;
+ if (pe->addr)
+ config_addr = pe->addr;
ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr,
- BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid),
+ BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid),
virt_to_phys(drv_log), len,
virt_to_phys(slot_errbuf), eeh_error_buf_size,
severity);
@@ -465,40 +542,38 @@ static int pseries_eeh_get_log(struct device_node *dn, int severity, char *drv_l
/**
* pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
- * @dn: PE associated device node
+ * @pe: EEH PE
*
* The function will be called to reconfigure the bridges included
* in the specified PE so that the mulfunctional PE would be recovered
* again.
*/
-static int pseries_eeh_configure_bridge(struct device_node *dn)
+static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
{
- struct eeh_dev *edev;
int config_addr;
int ret;
/* Figure out the PE address */
- edev = of_node_to_eeh_dev(dn);
- config_addr = edev->config_addr;
- if (edev->pe_config_addr)
- config_addr = edev->pe_config_addr;
+ config_addr = pe->config_addr;
+ if (pe->addr)
+ config_addr = pe->addr;
/* Use new configure-pe function, if supported */
if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
- config_addr, BUID_HI(edev->phb->buid),
- BUID_LO(edev->phb->buid));
+ config_addr, BUID_HI(pe->phb->buid),
+ BUID_LO(pe->phb->buid));
} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
- config_addr, BUID_HI(edev->phb->buid),
- BUID_LO(edev->phb->buid));
+ config_addr, BUID_HI(pe->phb->buid),
+ BUID_LO(pe->phb->buid));
} else {
return -EFAULT;
}
if (ret)
- pr_warning("%s: Unable to configure bridge %d for %s\n",
- __func__, ret, dn->full_name);
+ pr_warning("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
+ __func__, pe->phb->global_number, pe->addr, ret);
return ret;
}
@@ -542,6 +617,8 @@ static int pseries_eeh_write_config(struct device_node *dn, int where, int size,
static struct eeh_ops pseries_eeh_ops = {
.name = "pseries",
.init = pseries_eeh_init,
+ .of_probe = pseries_eeh_of_probe,
+ .dev_probe = NULL,
.set_option = pseries_eeh_set_option,
.get_pe_addr = pseries_eeh_get_pe_addr,
.get_state = pseries_eeh_get_state,
@@ -559,7 +636,21 @@ static struct eeh_ops pseries_eeh_ops = {
* EEH initialization on pseries platform. This function should be
* called before any EEH related functions.
*/
-int __init eeh_pseries_init(void)
+static int __init eeh_pseries_init(void)
{
- return eeh_ops_register(&pseries_eeh_ops);
+ int ret = -EINVAL;
+
+ if (!machine_is(pseries))
+ return ret;
+
+ ret = eeh_ops_register(&pseries_eeh_ops);
+ if (!ret)
+ pr_info("EEH: pSeries platform initialized\n");
+ else
+ pr_info("EEH: pSeries platform initialization failure (%d)\n",
+ ret);
+
+ return ret;
}
+
+early_initcall(eeh_pseries_init);
diff --git a/arch/powerpc/platforms/pseries/eeh_sysfs.c b/arch/powerpc/platforms/pseries/eeh_sysfs.c
index 243b3510d70f..d37708360f2e 100644
--- a/arch/powerpc/platforms/pseries/eeh_sysfs.c
+++ b/arch/powerpc/platforms/pseries/eeh_sysfs.c
@@ -53,9 +53,6 @@ static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL);
EEH_SHOW_ATTR(eeh_mode, mode, "0x%x");
EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x");
EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
-EEH_SHOW_ATTR(eeh_check_count, check_count, "%d" );
-EEH_SHOW_ATTR(eeh_freeze_count, freeze_count, "%d" );
-EEH_SHOW_ATTR(eeh_false_positives, false_positives, "%d" );
void eeh_sysfs_add_device(struct pci_dev *pdev)
{
@@ -64,9 +61,6 @@ void eeh_sysfs_add_device(struct pci_dev *pdev)
rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
- rc += device_create_file(&pdev->dev, &dev_attr_eeh_check_count);
- rc += device_create_file(&pdev->dev, &dev_attr_eeh_false_positives);
- rc += device_create_file(&pdev->dev, &dev_attr_eeh_freeze_count);
if (rc)
printk(KERN_WARNING "EEH: Unable to create sysfs entries\n");
@@ -77,8 +71,5 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev)
device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
- device_remove_file(&pdev->dev, &dev_attr_eeh_check_count);
- device_remove_file(&pdev->dev, &dev_attr_eeh_false_positives);
- device_remove_file(&pdev->dev, &dev_attr_eeh_freeze_count);
}
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index bca220f2873c..6153eea27ce7 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/memblock.h>
#include <linux/spinlock.h>
#include <linux/sched.h> /* for show_stack */
#include <linux/string.h>
@@ -41,7 +42,6 @@
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
-#include <asm/abs_addr.h>
#include <asm/pSeries_reconfig.h>
#include <asm/firmware.h>
#include <asm/tce.h>
@@ -99,7 +99,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
while (npages--) {
/* can't move this out since we might cross MEMBLOCK boundary */
- rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
+ rpn = __pa(uaddr) >> TCE_SHIFT;
*tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
uaddr += TCE_PAGE_SIZE;
@@ -148,7 +148,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
int ret = 0;
long tcenum_start = tcenum, npages_start = npages;
- rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
+ rpn = __pa(uaddr) >> TCE_SHIFT;
proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
@@ -217,7 +217,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
__get_cpu_var(tce_page) = tcep;
}
- rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
+ rpn = __pa(uaddr) >> TCE_SHIFT;
proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
@@ -237,7 +237,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
rc = plpar_tce_put_indirect((u64)tbl->it_index,
(u64)tcenum << 12,
- (u64)virt_to_abs(tcep),
+ (u64)__pa(tcep),
limit);
npages -= limit;
@@ -441,7 +441,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
rc = plpar_tce_put_indirect(liobn,
dma_offset,
- (u64)virt_to_abs(tcep),
+ (u64)__pa(tcep),
limit);
num_tce -= limit;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 5f3ef876ded2..0da39fed355a 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -31,7 +31,6 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/machdep.h>
-#include <asm/abs_addr.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/tlbflush.h>
@@ -108,9 +107,9 @@ void vpa_init(int cpu)
}
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
- unsigned long va, unsigned long pa,
- unsigned long rflags, unsigned long vflags,
- int psize, int ssize)
+ unsigned long vpn, unsigned long pa,
+ unsigned long rflags, unsigned long vflags,
+ int psize, int ssize)
{
unsigned long lpar_rc;
unsigned long flags;
@@ -118,11 +117,11 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long hpte_v, hpte_r;
if (!(vflags & HPTE_V_BOLTED))
- pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
- "rflags=%lx, vflags=%lx, psize=%d)\n",
- hpte_group, va, pa, rflags, vflags, psize);
+ pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
+ "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
+ hpte_group, vpn, pa, rflags, vflags, psize);
- hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
+ hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(pa, psize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
@@ -227,22 +226,6 @@ static void pSeries_lpar_hptab_clear(void)
}
/*
- * This computes the AVPN and B fields of the first dword of a HPTE,
- * for use when we want to match an existing PTE. The bottom 7 bits
- * of the returned value are zero.
- */
-static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
- int ssize)
-{
- unsigned long v;
-
- v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
- v <<= HPTE_V_AVPN_SHIFT;
- v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
- return v;
-}
-
-/*
* NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
* the low 3 bits of flags happen to line up. So no transform is needed.
* We can probably optimize here and assume the high bits of newpp are
@@ -250,14 +233,14 @@ static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
*/
static long pSeries_lpar_hpte_updatepp(unsigned long slot,
unsigned long newpp,
- unsigned long va,
+ unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long lpar_rc;
unsigned long flags = (newpp & 7) | H_AVPN;
unsigned long want_v;
- want_v = hpte_encode_avpn(va, psize, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
want_v, slot, flags, psize);
@@ -295,15 +278,15 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
return dword0;
}
-static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize)
+static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
{
unsigned long hash;
unsigned long i;
long slot;
unsigned long want_v, hpte_v;
- hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
- want_v = hpte_encode_avpn(va, psize, ssize);
+ hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
/* Bolted entries are always in the primary group */
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -323,12 +306,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
unsigned long ea,
int psize, int ssize)
{
- unsigned long lpar_rc, slot, vsid, va, flags;
+ unsigned long vpn;
+ unsigned long lpar_rc, slot, vsid, flags;
vsid = get_kernel_vsid(ea, ssize);
- va = hpt_va(ea, vsid, ssize);
+ vpn = hpt_vpn(ea, vsid, ssize);
- slot = pSeries_lpar_hpte_find(va, psize, ssize);
+ slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
BUG_ON(slot == -1);
flags = newpp & 7;
@@ -337,17 +321,17 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
BUG_ON(lpar_rc != H_SUCCESS);
}
-static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
+static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
int psize, int ssize, int local)
{
unsigned long want_v;
unsigned long lpar_rc;
unsigned long dummy1, dummy2;
- pr_devel(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
- slot, va, psize, local);
+ pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
+ slot, vpn, psize, local);
- want_v = hpte_encode_avpn(va, psize, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
if (lpar_rc == H_NOT_FOUND)
return;
@@ -358,15 +342,16 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
static void pSeries_lpar_hpte_removebolted(unsigned long ea,
int psize, int ssize)
{
- unsigned long slot, vsid, va;
+ unsigned long vpn;
+ unsigned long slot, vsid;
vsid = get_kernel_vsid(ea, ssize);
- va = hpt_va(ea, vsid, ssize);
+ vpn = hpt_vpn(ea, vsid, ssize);
- slot = pSeries_lpar_hpte_find(va, psize, ssize);
+ slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
BUG_ON(slot == -1);
- pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0);
+ pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, 0);
}
/* Flag bits for H_BULK_REMOVE */
@@ -382,12 +367,12 @@ static void pSeries_lpar_hpte_removebolted(unsigned long ea,
*/
static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
{
+ unsigned long vpn;
unsigned long i, pix, rc;
unsigned long flags = 0;
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long param[9];
- unsigned long va;
unsigned long hash, index, shift, hidx, slot;
real_pte_t pte;
int psize, ssize;
@@ -399,21 +384,21 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
ssize = batch->ssize;
pix = 0;
for (i = 0; i < number; i++) {
- va = batch->vaddr[i];
+ vpn = batch->vpn[i];
pte = batch->pte[i];
- pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
- hash = hpt_hash(va, shift, ssize);
+ pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
+ hash = hpt_hash(vpn, shift, ssize);
hidx = __rpte_to_hidx(pte, index);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
- pSeries_lpar_hpte_invalidate(slot, va, psize,
+ pSeries_lpar_hpte_invalidate(slot, vpn, psize,
ssize, local);
} else {
param[pix] = HBR_REQUEST | HBR_AVPN | slot;
- param[pix+1] = hpte_encode_avpn(va, psize,
+ param[pix+1] = hpte_encode_avpn(vpn, psize,
ssize);
pix += 2;
if (pix == 8) {
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 109fdb75578d..d19f4977c834 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -210,6 +210,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
{
struct device_node *dn;
+ struct eeh_dev *edev;
/* Found our PE and assume 8 at that point. */
@@ -217,7 +218,10 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
if (!dn)
return NULL;
- dn = eeh_find_device_pe(dn);
+ /* Get the top level device in the PE */
+ edev = of_node_to_eeh_dev(dn);
+ edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
+ dn = eeh_dev_to_of_node(edev);
if (!dn)
return NULL;
@@ -387,12 +391,13 @@ static int check_msix_entries(struct pci_dev *pdev)
return 0;
}
-static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
{
struct pci_dn *pdn;
int hwirq, virq, i, rc;
struct msi_desc *entry;
struct msi_msg msg;
+ int nvec = nvec_in;
pdn = get_pdn(pdev);
if (!pdn)
@@ -402,10 +407,23 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return -EINVAL;
/*
+ * Firmware currently refuse any non power of two allocation
+ * so we round up if the quota will allow it.
+ */
+ if (type == PCI_CAP_ID_MSIX) {
+ int m = roundup_pow_of_two(nvec);
+ int quota = msi_quota_for_device(pdev, m);
+
+ if (quota >= m)
+ nvec = m;
+ }
+
+ /*
* Try the new more explicit firmware interface, if that fails fall
* back to the old interface. The old interface is known to never
* return MSI-Xs.
*/
+again:
if (type == PCI_CAP_ID_MSI) {
rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
@@ -417,6 +435,10 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
if (rc != nvec) {
+ if (nvec != nvec_in) {
+ nvec = nvec_in;
+ goto again;
+ }
pr_debug("rtas_msi: rtas_change_msi() failed\n");
return rc;
}
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 2c6ded29f73d..56b864d777ee 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -73,7 +73,7 @@ void __init pSeries_final_fixup(void)
{
pSeries_request_regions();
- pci_addr_cache_build();
+ eeh_addr_cache_build();
}
/*
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 3ccebc83dc02..261a577a3dd2 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -65,27 +65,43 @@ pcibios_find_pci_bus(struct device_node *dn)
EXPORT_SYMBOL_GPL(pcibios_find_pci_bus);
/**
- * pcibios_remove_pci_devices - remove all devices under this bus
+ * __pcibios_remove_pci_devices - remove all devices under this bus
+ * @bus: the indicated PCI bus
+ * @purge_pe: destroy the PE on removal of PCI devices
*
* Remove all of the PCI devices under this bus both from the
* linux pci device tree, and from the powerpc EEH address cache.
+ * By default, the corresponding PE will be destroied during the
+ * normal PCI hotplug path. For PCI hotplug during EEH recovery,
+ * the corresponding PE won't be destroied and deallocated.
*/
-void pcibios_remove_pci_devices(struct pci_bus *bus)
+void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe)
{
- struct pci_dev *dev, *tmp;
+ struct pci_dev *dev, *tmp;
struct pci_bus *child_bus;
/* First go down child busses */
list_for_each_entry(child_bus, &bus->children, node)
- pcibios_remove_pci_devices(child_bus);
+ __pcibios_remove_pci_devices(child_bus, purge_pe);
pr_debug("PCI: Removing devices on bus %04x:%02x\n",
- pci_domain_nr(bus), bus->number);
+ pci_domain_nr(bus), bus->number);
list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
pr_debug(" * Removing %s...\n", pci_name(dev));
- eeh_remove_bus_device(dev);
- pci_stop_and_remove_bus_device(dev);
- }
+ eeh_remove_bus_device(dev, purge_pe);
+ pci_stop_and_remove_bus_device(dev);
+ }
+}
+
+/**
+ * pcibios_remove_pci_devices - remove all devices under this bus
+ *
+ * Remove all of the PCI devices under this bus both from the
+ * linux pci device tree, and from the powerpc EEH address cache.
+ */
+void pcibios_remove_pci_devices(struct pci_bus *bus)
+{
+ __pcibios_remove_pci_devices(bus, 1);
}
EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 51ecac920dd8..e3cb7ae61658 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -388,10 +388,8 @@ static void __init pSeries_setup_arch(void)
/* Find and initialize PCI host bridges */
init_pci_config_tokens();
- eeh_pseries_init();
find_and_init_phbs();
pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb);
- eeh_init();
pSeries_nvram_init();
@@ -416,16 +414,20 @@ static int __init pSeries_init_panel(void)
}
machine_arch_initcall(pseries, pSeries_init_panel);
-static int pseries_set_dabr(unsigned long dabr)
+static int pseries_set_dabr(unsigned long dabr, unsigned long dabrx)
{
return plpar_hcall_norets(H_SET_DABR, dabr);
}
-static int pseries_set_xdabr(unsigned long dabr)
+static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx)
{
- /* We want to catch accesses from kernel and userspace */
- return plpar_hcall_norets(H_SET_XDABR, dabr,
- H_DABRX_KERNEL | H_DABRX_USER);
+ /* Have to set at least one bit in the DABRX according to PAPR */
+ if (dabrx == 0 && dabr == 0)
+ dabrx = DABRX_USER;
+ /* PAPR says we can only set kernel and user bits */
+ dabrx &= DABRX_KERNEL | DABRX_USER;
+
+ return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx);
}
#define CMO_CHARACTERISTICS_TOKEN 44
@@ -529,10 +531,10 @@ static void __init pSeries_init_early(void)
if (firmware_has_feature(FW_FEATURE_LPAR))
hvc_vio_init_early();
#endif
- if (firmware_has_feature(FW_FEATURE_DABR))
- ppc_md.set_dabr = pseries_set_dabr;
- else if (firmware_has_feature(FW_FEATURE_XDABR))
+ if (firmware_has_feature(FW_FEATURE_XDABR))
ppc_md.set_dabr = pseries_set_xdabr;
+ else if (firmware_has_feature(FW_FEATURE_DABR))
+ ppc_md.set_dabr = pseries_set_dabr;
pSeries_cmo_feature_init();
iommu_init_early_pSeries();