aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/Kconfig15
-rw-r--r--arch/sparc64/kernel/central.c12
-rw-r--r--arch/sparc64/kernel/chmc.c4
-rw-r--r--arch/sparc64/kernel/ebus.c14
-rw-r--r--arch/sparc64/kernel/irq.c28
-rw-r--r--arch/sparc64/kernel/isa.c36
-rw-r--r--arch/sparc64/kernel/of_device.c59
-rw-r--r--arch/sparc64/kernel/pci.c603
-rw-r--r--arch/sparc64/kernel/pci_common.c806
-rw-r--r--arch/sparc64/kernel/pci_impl.h33
-rw-r--r--arch/sparc64/kernel/pci_iommu.c104
-rw-r--r--arch/sparc64/kernel/pci_psycho.c154
-rw-r--r--arch/sparc64/kernel/pci_sabre.c465
-rw-r--r--arch/sparc64/kernel/pci_schizo.c295
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c402
-rw-r--r--arch/sparc64/kernel/process.c15
-rw-r--r--arch/sparc64/kernel/prom.c39
-rw-r--r--arch/sparc64/kernel/sbus.c717
-rw-r--r--arch/sparc64/kernel/smp.c116
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c1
-rw-r--r--arch/sparc64/kernel/sys32.S1
-rw-r--r--arch/sparc64/kernel/sys_sunos32.c4
-rw-r--r--arch/sparc64/kernel/systbls.S2
-rw-r--r--arch/sparc64/kernel/time.c484
-rw-r--r--arch/sparc64/kernel/ttable.S6
-rw-r--r--arch/sparc64/mm/init.c280
-rw-r--r--arch/sparc64/solaris/misc.c9
27 files changed, 1656 insertions, 3048 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 1a6348b565f..590a41b864b 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -19,6 +19,14 @@ config SPARC64
SPARC64 ports; its web page is available at
<http://www.ultralinux.org/>.
+config GENERIC_TIME
+ bool
+ default y
+
+config GENERIC_CLOCKEVENTS
+ bool
+ default y
+
config 64BIT
def_bool y
@@ -34,10 +42,6 @@ config LOCKDEP_SUPPORT
bool
default y
-config TIME_INTERPOLATION
- bool
- default y
-
config ARCH_MAY_HAVE_PC_FDC
bool
default y
@@ -113,6 +117,8 @@ config GENERIC_HARDIRQS
menu "General machine setup"
+source "kernel/time/Kconfig"
+
config SMP
bool "Symmetric multi-processing support"
---help---
@@ -214,6 +220,7 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y
+ select SPARSEMEM_STATIC
config LARGE_ALLOCS
def_bool y
diff --git a/arch/sparc64/kernel/central.c b/arch/sparc64/kernel/central.c
index e724c54af02..c65b2f9c98d 100644
--- a/arch/sparc64/kernel/central.c
+++ b/arch/sparc64/kernel/central.c
@@ -32,7 +32,7 @@ static void central_probe_failure(int line)
static void central_ranges_init(struct linux_central *central)
{
struct device_node *dp = central->prom_node;
- void *pval;
+ const void *pval;
int len;
central->num_central_ranges = 0;
@@ -47,7 +47,7 @@ static void central_ranges_init(struct linux_central *central)
static void fhc_ranges_init(struct linux_fhc *fhc)
{
struct device_node *dp = fhc->prom_node;
- void *pval;
+ const void *pval;
int len;
fhc->num_fhc_ranges = 0;
@@ -119,7 +119,7 @@ static unsigned long prom_reg_to_paddr(struct linux_prom_registers *r)
static void probe_other_fhcs(void)
{
struct device_node *dp;
- struct linux_prom64_registers *fpregs;
+ const struct linux_prom64_registers *fpregs;
for_each_node_by_name(dp, "fhc") {
struct linux_fhc *fhc;
@@ -190,7 +190,8 @@ static void probe_clock_board(struct linux_central *central,
struct device_node *fp)
{
struct device_node *dp;
- struct linux_prom_registers cregs[3], *pr;
+ struct linux_prom_registers cregs[3];
+ const struct linux_prom_registers *pr;
int nslots, tmp, nregs;
dp = fp->child;
@@ -299,7 +300,8 @@ static void init_all_fhc_hw(void)
void central_probe(void)
{
- struct linux_prom_registers fpregs[6], *pr;
+ struct linux_prom_registers fpregs[6];
+ const struct linux_prom_registers *pr;
struct linux_fhc *fhc;
struct device_node *dp, *fp;
int err;
diff --git a/arch/sparc64/kernel/chmc.c b/arch/sparc64/kernel/chmc.c
index 9699abeb990..777d3457704 100644
--- a/arch/sparc64/kernel/chmc.c
+++ b/arch/sparc64/kernel/chmc.c
@@ -343,8 +343,8 @@ static int init_one_mctrl(struct device_node *dp)
{
struct mctrl_info *mp = kzalloc(sizeof(*mp), GFP_KERNEL);
int portid = of_getintprop_default(dp, "portid", -1);
- struct linux_prom64_registers *regs;
- void *pval;
+ const struct linux_prom64_registers *regs;
+ const void *pval;
int len;
if (!mp)
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
index 35bf895fdee..0ace17bafba 100644
--- a/arch/sparc64/kernel/ebus.c
+++ b/arch/sparc64/kernel/ebus.c
@@ -285,7 +285,7 @@ static void __init fill_ebus_child(struct device_node *dp,
int non_standard_regs)
{
struct of_device *op;
- int *regs;
+ const int *regs;
int i, len;
dev->prom_node = dp;
@@ -438,11 +438,9 @@ static struct pci_dev *find_next_ebus(struct pci_dev *start, int *is_rio_p)
void __init ebus_init(void)
{
- struct pci_pbm_info *pbm;
struct linux_ebus_device *dev;
struct linux_ebus *ebus;
struct pci_dev *pdev;
- struct pcidev_cookie *cookie;
struct device_node *dp;
int is_rio;
int num_ebus = 0;
@@ -453,8 +451,7 @@ void __init ebus_init(void)
return;
}
- cookie = pdev->sysdata;
- dp = cookie->prom_node;
+ dp = pci_device_to_OF_node(pdev);
ebus_chain = ebus = ebus_alloc(sizeof(struct linux_ebus));
ebus->next = NULL;
@@ -480,8 +477,7 @@ void __init ebus_init(void)
break;
}
ebus->is_rio = is_rio;
- cookie = pdev->sysdata;
- dp = cookie->prom_node;
+ dp = pci_device_to_OF_node(pdev);
continue;
}
printk("ebus%d:", num_ebus);
@@ -489,7 +485,6 @@ void __init ebus_init(void)
ebus->index = num_ebus;
ebus->prom_node = dp;
ebus->self = pdev;
- ebus->parent = pbm = cookie->pbm;
ebus->ofdev.node = dp;
ebus->ofdev.dev.parent = &pdev->dev;
@@ -531,8 +526,7 @@ void __init ebus_init(void)
if (!pdev)
break;
- cookie = pdev->sysdata;
- dp = cookie->prom_node;
+ dp = pci_device_to_OF_node(pdev);
ebus->next = ebus_alloc(sizeof(struct linux_ebus));
ebus = ebus->next;
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index c443db18437..6241e3dbbd5 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -589,32 +589,6 @@ void ack_bad_irq(unsigned int virt_irq)
ino, virt_irq);
}
-#ifndef CONFIG_SMP
-extern irqreturn_t timer_interrupt(int, void *);
-
-void timer_irq(int irq, struct pt_regs *regs)
-{
- unsigned long clr_mask = 1 << irq;
- unsigned long tick_mask = tick_ops->softint_mask;
- struct pt_regs *old_regs;
-
- if (get_softint() & tick_mask) {
- irq = 0;
- clr_mask = tick_mask;
- }
- clear_softint(clr_mask);
-
- old_regs = set_irq_regs(regs);
- irq_enter();
-
- kstat_this_cpu.irqs[0]++;
- timer_interrupt(irq, NULL);
-
- irq_exit();
- set_irq_regs(old_regs);
-}
-#endif
-
void handler_irq(int irq, struct pt_regs *regs)
{
struct ino_bucket *bucket;
@@ -653,7 +627,7 @@ static u64 prom_limit0, prom_limit1;
static void map_prom_timers(void)
{
struct device_node *dp;
- unsigned int *addr;
+ const unsigned int *addr;
/* PROM timer node hangs out in the top level of device siblings... */
dp = of_find_node_by_path("/");
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
index 98721a8f861..6a6882e57ff 100644
--- a/arch/sparc64/kernel/isa.c
+++ b/arch/sparc64/kernel/isa.c
@@ -24,27 +24,9 @@ static void __init report_dev(struct sparc_isa_device *isa_dev, int child)
static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev)
{
- struct linux_prom_registers *pregs;
- unsigned long base, len;
- int prop_len;
-
- pregs = of_get_property(isa_dev->prom_node, "reg", &prop_len);
- if (!pregs)
- return;
-
- /* Only the first one is interesting. */
- len = pregs[0].reg_size;
- base = (((unsigned long)pregs[0].which_io << 32) |
- (unsigned long)pregs[0].phys_addr);
- base += isa_dev->bus->parent->io_space.start;
-
- isa_dev->resource.start = base;
- isa_dev->resource.end = (base + len - 1UL);
- isa_dev->resource.flags = IORESOURCE_IO;
- isa_dev->resource.name = isa_dev->prom_node->name;
+ struct of_device *op = of_find_device_by_node(isa_dev->prom_node);
- request_resource(&isa_dev->bus->parent->io_space,
- &isa_dev->resource);
+ memcpy(&isa_dev->resource, &op->resource[0], sizeof(struct resource));
}
static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev)
@@ -158,19 +140,10 @@ void __init isa_init(void)
pdev = NULL;
while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) {
- struct pcidev_cookie *pdev_cookie;
- struct pci_pbm_info *pbm;
struct sparc_isa_bridge *isa_br;
struct device_node *dp;
- pdev_cookie = pdev->sysdata;
- if (!pdev_cookie) {
- printk("ISA: Warning, ISA bridge ignored due to "
- "lack of OBP data.\n");
- continue;
- }
- pbm = pdev_cookie->pbm;
- dp = pdev_cookie->prom_node;
+ dp = pci_device_to_OF_node(pdev);
isa_br = kzalloc(sizeof(*isa_br), GFP_KERNEL);
if (!isa_br) {
@@ -195,10 +168,9 @@ void __init isa_init(void)
isa_br->next = isa_chain;
isa_chain = isa_br;
- isa_br->parent = pbm;
isa_br->self = pdev;
isa_br->index = index++;
- isa_br->prom_node = pdev_cookie->prom_node;
+ isa_br->prom_node = dp;
printk("isa%d:", isa_br->index);
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index fb9bf1e4d03..9ac9a307999 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -245,7 +245,7 @@ struct of_bus {
int *addrc, int *sizec);
int (*map)(u32 *addr, const u32 *range,
int na, int ns, int pna);
- unsigned int (*get_flags)(u32 *addr);
+ unsigned int (*get_flags)(const u32 *addr);
};
/*
@@ -305,7 +305,7 @@ static int of_bus_default_map(u32 *addr, const u32 *range,
return 0;
}
-static unsigned int of_bus_default_get_flags(u32 *addr)
+static unsigned int of_bus_default_get_flags(const u32 *addr)
{
return IORESOURCE_MEM;
}
@@ -317,6 +317,11 @@ static unsigned int of_bus_default_get_flags(u32 *addr)
static int of_bus_pci_match(struct device_node *np)
{
if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) {
+ const char *model = of_get_property(np, "model", NULL);
+
+ if (model && !strcmp(model, "SUNW,simba"))
+ return 0;
+
/* Do not do PCI specific frobbing if the
* PCI bridge lacks a ranges property. We
* want to pass it through up to the next
@@ -332,6 +337,21 @@ static int of_bus_pci_match(struct device_node *np)
return 0;
}
+static int of_bus_simba_match(struct device_node *np)
+{
+ const char *model = of_get_property(np, "model", NULL);
+
+ if (model && !strcmp(model, "SUNW,simba"))
+ return 1;
+ return 0;
+}
+
+static int of_bus_simba_map(u32 *addr, const u32 *range,
+ int na, int ns, int pna)
+{
+ return 0;
+}
+
static void of_bus_pci_count_cells(struct device_node *np,
int *addrc, int *sizec)
{
@@ -369,7 +389,7 @@ static int of_bus_pci_map(u32 *addr, const u32 *range,
return 0;
}
-static unsigned int of_bus_pci_get_flags(u32 *addr)
+static unsigned int of_bus_pci_get_flags(const u32 *addr)
{
unsigned int flags = 0;
u32 w = addr[0];
@@ -436,6 +456,15 @@ static struct of_bus of_busses[] = {
.map = of_bus_pci_map,
.get_flags = of_bus_pci_get_flags,
},
+ /* SIMBA */
+ {
+ .name = "simba",
+ .addr_prop_name = "assigned-addresses",
+ .match = of_bus_simba_match,
+ .count_cells = of_bus_pci_count_cells,
+ .map = of_bus_simba_map,
+ .get_flags = of_bus_pci_get_flags,
+ },
/* SBUS */
{
.name = "sbus",
@@ -482,7 +511,7 @@ static int __init build_one_resource(struct device_node *parent,
u32 *addr,
int na, int ns, int pna)
{
- u32 *ranges;
+ const u32 *ranges;
unsigned int rlen;
int rone;
@@ -513,7 +542,7 @@ static int __init build_one_resource(struct device_node *parent,
static int __init use_1to1_mapping(struct device_node *pp)
{
- char *model;
+ const char *model;
/* If this is on the PMU bus, don't try to translate it even
* if a ranges property exists.
@@ -548,7 +577,7 @@ static void __init build_device_resources(struct of_device *op,
struct of_bus *bus;
int na, ns;
int index, num_reg;
- void *preg;
+ const void *preg;
if (!parent)
return;
@@ -578,7 +607,7 @@ static void __init build_device_resources(struct of_device *op,
for (index = 0; index < num_reg; index++) {
struct resource *r = &op->resource[index];
u32 addr[OF_MAX_ADDR_CELLS];
- u32 *reg = (preg + (index * ((na + ns) * 4)));
+ const u32 *reg = (preg + (index * ((na + ns) * 4)));
struct device_node *dp = op->node;
struct device_node *pp = p_op->node;
struct of_bus *pbus, *dbus;
@@ -643,14 +672,14 @@ static void __init build_device_resources(struct of_device *op,
static struct device_node * __init
apply_interrupt_map(struct device_node *dp, struct device_node *pp,
- u32 *imap, int imlen, u32 *imask,
+ const u32 *imap, int imlen, const u32 *imask,
unsigned int *irq_p)
{
struct device_node *cp;
unsigned int irq = *irq_p;
struct of_bus *bus;
phandle handle;
- u32 *reg;
+ const u32 *reg;
int na, num_reg, i;
bus = of_match_bus(pp);
@@ -705,7 +734,7 @@ static unsigned int __init pci_irq_swizzle(struct device_node *dp,
struct device_node *pp,
unsigned int irq)
{
- struct linux_prom_pci_registers *regs;
+ const struct linux_prom_pci_registers *regs;
unsigned int bus, devfn, slot, ret;
if (irq < 1 || irq > 4)
@@ -730,12 +759,6 @@ static unsigned int __init pci_irq_swizzle(struct device_node *dp,
* D: 2-bit slot number, derived from PCI device number as
* (dev - 1) for bus A, or (dev - 2) for bus B
* L: 2-bit line number
- *
- * Actually, more "portable" way to calculate the funky
- * slot number is to subtract pbm->pci_first_slot from the
- * device number, and that's exactly what the pre-OF
- * sparc64 code did, but we're building this stuff generically
- * using the OBP tree, not in the PCI controller layer.
*/
if (bus & 0x80) {
/* PBM-A */
@@ -794,7 +817,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op,
pp = dp->parent;
ip = NULL;
while (pp) {
- void *imap, *imsk;
+ const void *imap, *imsk;
int imlen;
imap = of_get_property(pp, "interrupt-map", &imlen);
@@ -859,7 +882,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
struct device *parent)
{
struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
- unsigned int *irq;
+ const unsigned int *irq;
int len, i;
if (!op)
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 196b4b72482..023af41ad68 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -1,9 +1,11 @@
-/* $Id: pci.c,v 1.39 2002/01/05 01:13:43 davem Exp $
- * pci.c: UltraSparc PCI controller support.
+/* pci.c: UltraSparc PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
* Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ *
+ * OF tree based PCI bus probing taken from the PowerPC port
+ * with minor modifications, see there for credits.
*/
#include <linux/module.h>
@@ -24,6 +26,9 @@
#include <asm/ebus.h>
#include <asm/isa.h>
#include <asm/prom.h>
+#include <asm/apb.h>
+
+#include "pci_impl.h"
unsigned long pci_memspace_mask = 0xffffffffUL;
@@ -277,10 +282,10 @@ int __init pcic_present(void)
return pci_controller_scan(pci_is_controller);
}
-struct pci_iommu_ops *pci_iommu_ops;
+const struct pci_iommu_ops *pci_iommu_ops;
EXPORT_SYMBOL(pci_iommu_ops);
-extern struct pci_iommu_ops pci_sun4u_iommu_ops,
+extern const struct pci_iommu_ops pci_sun4u_iommu_ops,
pci_sun4v_iommu_ops;
/* Find each controller in the system, attach and initialize
@@ -300,6 +305,467 @@ static void __init pci_controller_probe(void)
pci_controller_scan(pci_controller_init);
}
+static unsigned long pci_parse_of_flags(u32 addr0)
+{
+ unsigned long flags = 0;
+
+ if (addr0 & 0x02000000) {
+ flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
+ flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
+ flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
+ if (addr0 & 0x40000000)
+ flags |= IORESOURCE_PREFETCH
+ | PCI_BASE_ADDRESS_MEM_PREFETCH;
+ } else if (addr0 & 0x01000000)
+ flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
+ return flags;
+}
+
+/* The of_device layer has translated all of the assigned-address properties
+ * into physical address resources, we only have to figure out the register
+ * mapping.
+ */
+static void pci_parse_of_addrs(struct of_device *op,
+ struct device_node *node,
+ struct pci_dev *dev)
+{
+ struct resource *op_res;
+ const u32 *addrs;
+ int proplen;
+
+ addrs = of_get_property(node, "assigned-addresses", &proplen);
+ if (!addrs)
+ return;
+ printk(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
+ op_res = &op->resource[0];
+ for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
+ struct resource *res;
+ unsigned long flags;
+ int i;
+
+ flags = pci_parse_of_flags(addrs[0]);
+ if (!flags)
+ continue;
+ i = addrs[0] & 0xff;
+ printk(" start: %lx, end: %lx, i: %x\n",
+ op_res->start, op_res->end, i);
+
+ if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
+ res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
+ } else if (i == dev->rom_base_reg) {
+ res = &dev->resource[PCI_ROM_RESOURCE];
+ flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
+ } else {
+ printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
+ continue;
+ }
+ res->start = op_res->start;
+ res->end = op_res->end;
+ res->flags = flags;
+ res->name = pci_name(dev);
+ }
+}
+
+struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
+ struct device_node *node,
+ struct pci_bus *bus, int devfn,
+ int host_controller)
+{
+ struct dev_archdata *sd;
+ struct pci_dev *dev;
+ const char *type;
+ u32 class;
+
+ dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ sd = &dev->dev.archdata;
+ sd->iommu = pbm->iommu;
+ sd->stc = &pbm->stc;
+ sd->host_controller = pbm;
+ sd->prom_node = node;
+ sd->op = of_find_device_by_node(node);
+ sd->msi_num = 0xffffffff;
+
+ type = of_get_property(node, "device_type", NULL);
+ if (type == NULL)
+ type = "";
+
+ printk(" create device, devfn: %x, type: %s hostcontroller(%d)\n",
+ devfn, type, host_controller);
+
+ dev->bus = bus;
+ dev->sysdata = node;
+ dev->dev.parent = bus->bridge;
+ dev->dev.bus = &pci_bus_type;
+ dev->devfn = devfn;
+ dev->multifunction = 0; /* maybe a lie? */
+
+ if (host_controller) {
+ dev->vendor = 0x108e;
+ dev->device = 0x8000;
+ dev->subsystem_vendor = 0x0000;
+ dev->subsystem_device = 0x0000;
+ dev->cfg_size = 256;
+ dev->class = PCI_CLASS_BRIDGE_HOST << 8;
+ sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
+ 0x00, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ } else {
+ dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
+ dev->device = of_getintprop_default(node, "device-id", 0xffff);
+ dev->subsystem_vendor =
+ of_getintprop_default(node, "subsystem-vendor-id", 0);
+ dev->subsystem_device =
+ of_getintprop_default(node, "subsystem-id", 0);
+
+ dev->cfg_size = pci_cfg_space_size(dev);
+
+ /* We can't actually use the firmware value, we have
+ * to read what is in the register right now. One
+ * reason is that in the case of IDE interfaces the
+ * firmware can sample the value before the the IDE
+ * interface is programmed into native mode.
+ */
+ pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
+ dev->class = class >> 8;
+
+ sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
+ dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ }
+ printk(" class: 0x%x device name: %s\n",
+ dev->class, pci_name(dev));
+
+ dev->current_state = 4; /* unknown power state */
+ dev->error_state = pci_channel_io_normal;
+
+ if (host_controller) {
+ dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
+ dev->rom_base_reg = PCI_ROM_ADDRESS1;
+ dev->irq = PCI_IRQ_NONE;
+ } else {
+ if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
+ /* a PCI-PCI bridge */
+ dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
+ dev->rom_base_reg = PCI_ROM_ADDRESS1;
+ } else if (!strcmp(type, "cardbus")) {
+ dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
+ } else {
+ dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
+ dev->rom_base_reg = PCI_ROM_ADDRESS;
+
+ dev->irq = sd->op->irqs[0];
+ if (dev->irq == 0xffffffff)
+ dev->irq = PCI_IRQ_NONE;
+ }
+ }
+ pci_parse_of_addrs(sd->op, node, dev);
+
+ printk(" adding to system ...\n");
+
+ pci_device_add(dev, bus);
+
+ return dev;
+}
+
+static void __init apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
+{
+ u32 idx, first, last;
+
+ first = 8;
+ last = 0;
+ for (idx = 0; idx < 8; idx++) {
+ if ((map & (1 << idx)) != 0) {
+ if (first > idx)
+ first = idx;
+ if (last < idx)
+ last = idx;
+ }
+ }
+
+ *first_p = first;
+ *last_p = last;
+}
+
+static void __init pci_resource_adjust(struct resource *res,
+ struct resource *root)
+{
+ res->start += root->start;
+ res->end += root->start;
+}
+
+/* Cook up fake bus resources for SUNW,simba PCI bridges which lack
+ * a proper 'ranges' property.
+ */
+static void __init apb_fake_ranges(struct pci_dev *dev,
+ struct pci_bus *bus,
+ struct pci_pbm_info *pbm)
+{
+ struct resource *res;
+ u32 first, last;
+ u8 map;
+
+ pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map);
+ apb_calc_first_last(map, &first, &last);
+ res = bus->resource[0];
+ res->start = (first << 21);
+ res->end = (last << 21) + ((1 << 21) - 1);
+ res->flags = IORESOURCE_IO;
+ pci_resource_adjust(res, &pbm->io_space);
+
+ pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
+ apb_calc_first_last(map, &first, &last);
+ res = bus->resource[1];
+ res->start = (first << 21);
+ res->end = (last << 21) + ((1 << 21) - 1);
+ res->flags = IORESOURCE_MEM;
+ pci_resource_adjust(res, &pbm->mem_space);
+}
+
+static void __init pci_of_scan_bus(struct pci_pbm_info *pbm,
+ struct device_node *node,
+ struct pci_bus *bus);
+
+#define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
+
+void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
+ struct device_node *node,
+ struct pci_dev *dev)
+{
+ struct pci_bus *bus;
+ const u32 *busrange, *ranges;
+ int len, i, simba;
+ struct resource *res;
+ unsigned int flags;
+ u64 size;
+
+ printk("of_scan_pci_bridge(%s)\n", node->full_name);
+
+ /* parse bus-range property */
+ busrange = of_get_property(node, "bus-range", &len);
+ if (busrange == NULL || len != 8) {
+ printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
+ node->full_name);
+ return;
+ }
+ ranges = of_get_property(node, "ranges", &len);
+ simba = 0;
+ if (ranges == NULL) {
+ const char *model = of_get_property(node, "model", NULL);
+ if (model && !strcmp(model, "SUNW,simba")) {
+ simba = 1;
+ } else {
+ printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
+ node->full_name);
+ return;
+ }
+ }
+
+ bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
+ if (!bus) {
+ printk(KERN_ERR "Failed to create pci bus for %s\n",
+ node->full_name);
+ return;
+ }
+
+ bus->primary = dev->bus->number;
+ bus->subordinate = busrange[1];
+ bus->bridge_ctl = 0;
+
+ /* parse ranges property, or cook one up by hand for Simba */
+ /* PCI #address-cells == 3 and #size-cells == 2 always */
+ res = &dev->resource[PCI_BRIDGE_RESOURCES];
+ for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
+ res->flags = 0;
+ bus->resource[i] = res;
+ ++res;
+ }
+ if (simba) {
+ apb_fake_ranges(dev, bus, pbm);
+ goto simba_cont;
+ }
+ i = 1;
+ for (; len >= 32; len -= 32, ranges += 8) {
+ struct resource *root;
+
+ flags = pci_parse_of_flags(ranges[0]);
+ size = GET_64BIT(ranges, 6);
+ if (flags == 0 || size == 0)
+ continue;
+ if (flags & IORESOURCE_IO) {
+ res = bus->resource[0];
+ if (res->flags) {
+ printk(KERN_ERR "PCI: ignoring extra I/O range"
+ " for bridge %s\n", node->full_name);
+ continue;
+ }
+ root = &pbm->io_space;
+ } else {
+ if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
+ printk(KERN_ERR "PCI: too many memory ranges"
+ " for bridge %s\n", node->full_name);
+ continue;
+ }
+ res = bus->resource[i];
+ ++i;
+ root = &pbm->mem_space;
+ }
+
+ res->start = GET_64BIT(ranges, 1);
+ res->end = res->start + size - 1;
+ res->flags = flags;
+
+ /* Another way to implement this would be to add an of_device
+ * layer routine that can calculate a resource for a given
+ * range property value in a PCI device.
+ */
+ pci_resource_adjust(res, root);
+ }
+simba_cont:
+ sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
+ bus->number);
+ printk(" bus name: %s\n", bus->name);
+
+ pci_of_scan_bus(pbm, node, bus);
+}
+
+static void __init pci_of_scan_bus(struct pci_pbm_info *pbm,
+ struct device_node *node,
+ struct pci_bus *bus)
+{
+ struct device_node *child;
+ const u32 *reg;
+ int reglen, devfn;
+ struct pci_dev *dev;
+
+ printk("PCI: scan_bus[%s] bus no %d\n",
+ node->full_name, bus->number);
+
+ child = NULL;
+ while ((child = of_get_next_child(node, child)) != NULL) {
+ printk(" * %s\n", child->full_name);
+ reg = of_get_property(child, "reg", &reglen);
+ if (reg == NULL || reglen < 20)
+ continue;
+ devfn = (reg[0] >> 8) & 0xff;
+
+ /* create a new pci_dev for this device */
+ dev = of_create_pci_dev(pbm, child, bus, devfn, 0);
+ if (!dev)
+ continue;
+ printk("PCI: dev header type: %x\n", dev->hdr_type);
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
+ dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ of_scan_pci_bridge(pbm, child, dev);
+ }
+}
+
+static ssize_t
+show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf)
+{
+ struct pci_dev *pdev;
+ struct device_node *dp;
+
+ pdev = to_pci_dev(dev);
+ dp = pdev->dev.archdata.prom_node;
+
+ return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name);
+}
+
+static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
+
+static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+ struct pci_bus *child_bus;
+ int err;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ /* we don't really care if we can create this file or
+ * not, but we need to assign the result of the call
+ * or the world will fall under alien invasion and
+ * everybody will be frozen on a spaceship ready to be
+ * eaten on alpha centauri by some green and jelly
+ * humanoid.
+ */
+ err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
+ }
+ list_for_each_entry(child_bus, &bus->children, node)
+ pci_bus_register_of_sysfs(child_bus);
+}
+
+int pci_host_bridge_read_pci_cfg(struct pci_bus *bus_dev,
+ unsigned int devfn,
+ int where, int size,
+ u32 *value)
+{
+ static u8 fake_pci_config[] = {
+ 0x8e, 0x10, /* Vendor: 0x108e (Sun) */
+ 0x00, 0x80, /* Device: 0x8000 (PBM) */
+ 0x46, 0x01, /* Command: 0x0146 (SERR, PARITY, MASTER, MEM) */
+ 0xa0, 0x22, /* Status: 0x02a0 (DEVSEL_MED, FB2B, 66MHZ) */
+ 0x00, 0x00, 0x00, 0x06, /* Class: 0x06000000 host bridge */
+ 0x00, /* Cacheline: 0x00 */
+ 0x40, /* Latency: 0x40 */
+ 0x00, /* Header-Type: 0x00 normal */
+ };
+
+ *value = 0;
+ if (where >= 0 && where < sizeof(fake_pci_config) &&
+ (where + size) >= 0 &&
+ (where + size) < sizeof(fake_pci_config) &&
+ size <= sizeof(u32)) {
+ while (size--) {
+ *value <<= 8;
+ *value |= fake_pci_config[where + size];
+ }
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+int pci_host_bridge_write_pci_cfg(struct pci_bus *bus_dev,
+ unsigned int devfn,
+ int where, int size,
+ u32 value)
+{
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_bus * __init pci_scan_one_pbm(struct pci_pbm_info *pbm)
+{
+ struct pci_controller_info *p = pbm->parent;
+ struct device_node *node = pbm->prom_node;
+ struct pci_dev *host_pdev;
+ struct pci_bus *bus;
+
+ printk("PCI: Scanning PBM %s\n", node->full_name);
+
+ /* XXX parent device? XXX */
+ bus = pci_create_bus(NULL, pbm->pci_first_busno, p->pci_ops, pbm);
+ if (!bus) {
+ printk(KERN_ERR "Failed to create bus for %s\n",
+ node->full_name);
+ return NULL;
+ }
+ bus->secondary = pbm->pci_first_busno;
+ bus->subordinate = pbm->pci_last_busno;
+
+ bus->resource[0] = &pbm->io_space;
+ bus->resource[1] = &pbm->mem_space;
+
+ /* Create the dummy host bridge and link it in. */
+ host_pdev = of_create_pci_dev(pbm, node, bus, 0x00, 1);
+ bus->self = host_pdev;
+
+ pci_of_scan_bus(pbm, node, bus);
+ pci_bus_add_devices(bus);
+ pci_bus_register_of_sysfs(bus);
+
+ return bus;
+}
+
static void __init pci_scan_each_controller_bus(void)
{
struct pci_controller_info *p;
@@ -327,7 +793,7 @@ static int __init pcibios_init(void)
subsys_initcall(pcibios_init);
-void pcibios_fixup_bus(struct pci_bus *pbus)
+void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
{
struct pci_pbm_info *pbm = pbus->sysdata;
@@ -360,8 +826,33 @@ void pcibios_align_resource(void *data, struct resource *res,
{
}
-int pcibios_enable_device(struct pci_dev *pdev, int mask)
+int pcibios_enable_device(struct pci_dev *dev, int mask)
{
+ u16 cmd, oldcmd;
+ int i;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ oldcmd = cmd;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *res = &dev->resource[i];
+
+ /* Only set up the requested stuff */
+ if (!(mask & (1<<i)))
+ continue;
+
+ if (res->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (res->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+
+ if (cmd != oldcmd) {
+ printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
+ pci_name(dev), cmd);
+ /* Enable the appropriate bits in the PCI command register. */
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
return 0;
}
@@ -380,7 +871,7 @@ void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region
else
root = &pbm->mem_space;
- pbm->parent->resource_adjust(pdev, &zero_res, root);
+ pci_resource_adjust(&zero_res, root);
region->start = res->start - zero_res.start;
region->end = res->end - zero_res.start;
@@ -401,11 +892,11 @@ void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
else
root = &pbm->mem_space;
- pbm->parent->resource_adjust(pdev, res, root);
+ pci_resource_adjust(res, root);
}
EXPORT_SYMBOL(pcibios_bus_to_resource);
-char * __init pcibios_setup(char *str)
+char * __devinit pcibios_setup(char *str)
{
return str;
}
@@ -422,55 +913,17 @@ char * __init pcibios_setup(char *str)
static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct pci_pbm_info *pbm;
+ struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
struct pci_controller_info *p;
unsigned long space_size, user_offset, user_size;
- if (!pcp)
- return -ENXIO;
- pbm = pcp->pbm;
- if (!pbm)
- return -ENXIO;
-
p = pbm->parent;
- if (p->pbms_same_domain) {
- unsigned long lowest, highest;
-
- lowest = ~0UL; highest = 0UL;
- if (mmap_state == pci_mmap_io) {
- if (p->pbm_A.io_space.flags) {
- lowest = p->pbm_A.io_space.start;
- highest = p->pbm_A.io_space.end + 1;
- }
- if (p->pbm_B.io_space.flags) {
- if (lowest > p->pbm_B.io_space.start)
- lowest = p->pbm_B.io_space.start;
- if (highest < p->pbm_B.io_space.end + 1)
- highest = p->pbm_B.io_space.end + 1;
- }
- space_size = highest - lowest;
- } else {
- if (p->pbm_A.mem_space.flags) {
- lowest = p->pbm_A.mem_space.start;
- highest = p->pbm_A.mem_space.end + 1;
- }
- if (p->pbm_B.mem_space.flags) {
- if (lowest > p->pbm_B.mem_space.start)
- lowest = p->pbm_B.mem_space.start;
- if (highest < p->pbm_B.mem_space.end + 1)
- highest = p->pbm_B.mem_space.end + 1;
- }
- space_size = highest - lowest;
- }
+ if (mmap_state == pci_mmap_io) {
+ space_size = (pbm->io_space.end -
+ pbm->io_space.start) + 1;
} else {
- if (mmap_state == pci_mmap_io) {
- space_size = (pbm->io_space.end -
- pbm->io_space.start) + 1;
- } else {
- space_size = (pbm->mem_space.end -
- pbm->mem_space.start) + 1;
- }
+ space_size = (pbm->mem_space.end -
+ pbm->mem_space.start) + 1;
}
/* Make sure the request is in range. */
@@ -481,31 +934,12 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc
(user_offset + user_size) > space_size)
return -EINVAL;
- if (p->pbms_same_domain) {
- unsigned long lowest = ~0UL;
-
- if (mmap_state == pci_mmap_io) {
- if (p->pbm_A.io_space.flags)
- lowest = p->pbm_A.io_space.start;
- if (p->pbm_B.io_space.flags &&
- lowest > p->pbm_B.io_space.start)
- lowest = p->pbm_B.io_space.start;
- } else {
- if (p->pbm_A.mem_space.flags)
- lowest = p->pbm_A.mem_space.start;
- if (p->pbm_B.mem_space.flags &&
- lowest > p->pbm_B.mem_space.start)
- lowest = p->pbm_B.mem_space.start;
- }
- vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT;
+ if (mmap_state == pci_mmap_io) {
+ vma->vm_pgoff = (pbm->io_space.start +
+ user_offset) >> PAGE_SHIFT;
} else {
- if (mmap_state == pci_mmap_io) {
- vma->vm_pgoff = (pbm->io_space.start +
- user_offset) >> PAGE_SHIFT;
- } else {
- vma->vm_pgoff = (pbm->mem_space.start +
- user_offset) >> PAGE_SHIFT;
- }
+ vma->vm_pgoff = (pbm->mem_space.start +
+ user_offset) >> PAGE_SHIFT;
}
return 0;
@@ -639,9 +1073,8 @@ int pci_domain_nr(struct pci_bus *pbus)
struct pci_controller_info *p = pbm->parent;
ret = p->index;
- if (p->pbms_same_domain == 0)
- ret = ((ret << 1) +
- ((pbm == &pbm->parent->pbm_B) ? 1 : 0));
+ ret = ((ret << 1) +
+ ((pbm == &pbm->parent->pbm_B) ? 1 : 0));
}
return ret;
@@ -651,8 +1084,7 @@ EXPORT_SYMBOL(pci_domain_nr);
#ifdef CONFIG_PCI_MSI
int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct pci_pbm_info *pbm = pcp->pbm;
+ struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
struct pci_controller_info *p = pbm->parent;
int virt_irq, err;
@@ -670,8 +1102,7 @@ void arch_teardown_msi_irq(unsigned int virt_irq)
{
struct msi_desc *entry = get_irq_msi(virt_irq);
struct pci_dev *pdev = entry->dev;
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct pci_pbm_info *pbm = pcp->pbm;
+ struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
struct pci_controller_info *p = pbm->parent;
if (!pbm->msi_num || !p->setup_msi_irq)
@@ -683,9 +1114,7 @@ void arch_teardown_msi_irq(unsigned int virt_irq)
struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
{
- struct pcidev_cookie *pc = pdev->sysdata;
-
- return pc->op->node;
+ return pdev->dev.archdata.prom_node;
}
EXPORT_SYMBOL(pci_device_to_OF_node);
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
index 5a92cb90ebe..1e6aeedf43c 100644
--- a/arch/sparc64/kernel/pci_common.c
+++ b/arch/sparc64/kernel/pci_common.c
@@ -1,7 +1,6 @@
-/* $Id: pci_common.c,v 1.29 2002/02/01 00:56:03 davem Exp $
- * pci_common.c: PCI controller common support.
+/* pci_common.c: PCI controller common support.
*
- * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/string.h>
@@ -16,748 +15,137 @@
#include "pci_impl.h"
-/* Fix self device of BUS and hook it into BUS->self.
- * The pci_scan_bus does not do this for the host bridge.
- */
-void __init pci_fixup_host_bridge_self(struct pci_bus *pbus)
-{
- struct pci_dev *pdev;
-
- list_for_each_entry(pdev, &pbus->devices, bus_list) {
- if (pdev->class >> 8 == PCI_CLASS_BRIDGE_HOST) {
- pbus->self = pdev;
- return;
- }
- }
-
- prom_printf("PCI: Critical error, cannot find host bridge PDEV.\n");
- prom_halt();
-}
-
-/* Find the OBP PROM device tree node for a PCI device. */
-static struct device_node * __init
-find_device_prom_node(struct pci_pbm_info *pbm, struct pci_dev *pdev,
- struct device_node *bus_node,
- struct linux_prom_pci_registers **pregs,
- int *nregs)
+static void pci_register_legacy_regions(struct resource *io_res,
+ struct resource *mem_res)
{
- struct device_node *dp;
-
- *nregs = 0;
-
- /*
- * Return the PBM's PROM node in case we are it's PCI device,
- * as the PBM's reg property is different to standard PCI reg
- * properties. We would delete this device entry otherwise,
- * which confuses XFree86's device probing...
- */
- if ((pdev->bus->number == pbm->pci_bus->number) && (pdev->devfn == 0) &&
- (pdev->vendor == PCI_VENDOR_ID_SUN) &&
- (pdev->device == PCI_DEVICE_ID_SUN_PBM ||
- pdev->device == PCI_DEVICE_ID_SUN_SCHIZO ||
- pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO ||
- pdev->device == PCI_DEVICE_ID_SUN_SABRE ||
- pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD))
- return bus_node;
-
- dp = bus_node->child;
- while (dp) {
- struct linux_prom_pci_registers *regs;
- struct property *prop;
- int len;
-
- prop = of_find_property(dp, "reg", &len);
- if (!prop)
- goto do_next_sibling;
-
- regs = prop->value;
- if (((regs[0].phys_hi >> 8) & 0xff) == pdev->devfn) {
- *pregs = regs;
- *nregs = len / sizeof(struct linux_prom_pci_registers);
- return dp;
- }
-
- do_next_sibling:
- dp = dp->sibling;
- }
-
- return NULL;
-}
+ struct resource *p;
-/* Older versions of OBP on PCI systems encode 64-bit MEM
- * space assignments incorrectly, this fixes them up. We also
- * take the opportunity here to hide other kinds of bogus
- * assignments.
- */
-static void __init fixup_obp_assignments(struct pci_dev *pdev,
- struct pcidev_cookie *pcp)
-{
- int i;
-
- if (pdev->vendor == PCI_VENDOR_ID_AL &&
- (pdev->device == PCI_DEVICE_ID_AL_M7101 ||
- pdev->device == PCI_DEVICE_ID_AL_M1533)) {
- int i;
-
- /* Zap all of the normal resources, they are
- * meaningless and generate bogus resource collision
- * messages. This is OpenBoot's ill-fated attempt to
- * represent the implicit resources that these devices
- * have.
- */
- pcp->num_prom_assignments = 0;
- for (i = 0; i < 6; i++) {
- pdev->resource[i].start =
- pdev->resource[i].end =
- pdev->resource[i].flags = 0;
- }
- pdev->resource[PCI_ROM_RESOURCE].start =
- pdev->resource[PCI_ROM_RESOURCE].end =
- pdev->resource[PCI_ROM_RESOURCE].flags = 0;
+ /* VGA Video RAM. */
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
return;
- }
-
- for (i = 0; i < pcp->num_prom_assignments; i++) {
- struct linux_prom_pci_registers *ap;
- int space;
- ap = &pcp->prom_assignments[i];
- space = ap->phys_hi >> 24;
- if ((space & 0x3) == 2 &&
- (space & 0x4) != 0) {
- ap->phys_hi &= ~(0x7 << 24);
- ap->phys_hi |= 0x3 << 24;
- }
- }
-}
-
-static ssize_t
-show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf)
-{
- struct pci_dev *pdev;
- struct pcidev_cookie *sysdata;
-
- pdev = to_pci_dev(dev);
- sysdata = pdev->sysdata;
-
- return snprintf (buf, PAGE_SIZE, "%s\n", sysdata->prom_node->full_name);
-}
-
-static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
+ p->name = "Video RAM area";
+ p->start = mem_res->start + 0xa0000UL;
+ p->end = p->start + 0x1ffffUL;
+ p->flags = IORESOURCE_BUSY;
+ request_resource(mem_res, p);
-/* Fill in the PCI device cookie sysdata for the given
- * PCI device. This cookie is the means by which one
- * can get to OBP and PCI controller specific information
- * for a PCI device.
- */
-static void __init pdev_cookie_fillin(struct pci_pbm_info *pbm,
- struct pci_dev *pdev,
- struct device_node *bus_node)
-{
- struct linux_prom_pci_registers *pregs = NULL;
- struct pcidev_cookie *pcp;
- struct device_node *dp;
- struct property *prop;
- int nregs, len, err;
-
- dp = find_device_prom_node(pbm, pdev, bus_node,
- &pregs, &nregs);
- if (!dp) {
- /* If it is not in the OBP device tree then
- * there must be a damn good reason for it.
- *
- * So what we do is delete the device from the
- * PCI device tree completely. This scenario
- * is seen, for example, on CP1500 for the
- * second EBUS/HappyMeal pair if the external
- * connector for it is not present.
- */
- pci_remove_bus_device(pdev);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
return;
- }
-
- pcp = kzalloc(sizeof(*pcp), GFP_ATOMIC);
- if (pcp == NULL) {
- prom_printf("PCI_COOKIE: Fatal malloc error, aborting...\n");
- prom_halt();
- }
- pcp->pbm = pbm;
- pcp->prom_node = dp;
- pcp->op = of_find_device_by_node(dp);
- memcpy(pcp->prom_regs, pregs,
- nregs * sizeof(struct linux_prom_pci_registers));
- pcp->num_prom_regs = nregs;
-
- /* We can't have the pcidev_cookie assignments be just
- * direct pointers into the property value, since they
- * are potentially modified by the probing process.
- */
- prop = of_find_property(dp, "assigned-addresses", &len);
- if (!prop) {
- pcp->num_prom_assignments = 0;
- } else {
- memcpy(pcp->prom_assignments, prop->value, len);
- pcp->num_prom_assignments =
- (len / sizeof(pcp->prom_assignments[0]));
- }
-
- if (strcmp(dp->name, "ebus") == 0) {
- struct linux_prom_ebus_ranges *erng;
- int iter;
-
- /* EBUS is special... */
- prop = of_find_property(dp, "ranges", &len);
- if (!prop) {
- prom_printf("EBUS: Fatal error, no range property\n");
- prom_halt();
- }
- erng = prop->value;
- len = (len / sizeof(erng[0]));
- for (iter = 0; iter < len; iter++) {
- struct linux_prom_ebus_ranges *ep = &erng[iter];
- struct linux_prom_pci_registers *ap;
-
- ap = &pcp->prom_assignments[iter];
-
- ap->phys_hi = ep->parent_phys_hi;
- ap->phys_mid = ep->parent_phys_mid;
- ap->phys_lo = ep->parent_phys_lo;
- ap->size_hi = 0;
- ap->size_lo = ep->size;
- }
- pcp->num_prom_assignments = len;
- }
-
- fixup_obp_assignments(pdev, pcp);
-
- pdev->sysdata = pcp;
-
- /* we don't really care if we can create this file or not,
- * but we need to assign the result of the call or the world will fall
- * under alien invasion and everybody will be frozen on a spaceship
- * ready to be eaten on alpha centauri by some green and jelly humanoid.
- */
- err = sysfs_create_file(&pdev->dev.kobj, &dev_attr_obppath.attr);
-}
-
-void __init pci_fill_in_pbm_cookies(struct pci_bus *pbus,
- struct pci_pbm_info *pbm,
- struct device_node *dp)
-{
- struct pci_dev *pdev, *pdev_next;
- struct pci_bus *this_pbus, *pbus_next;
-
- /* This must be _safe because the cookie fillin
- routine can delete devices from the tree. */
- list_for_each_entry_safe(pdev, pdev_next, &pbus->devices, bus_list)
- pdev_cookie_fillin(pbm, pdev, dp);
-
- list_for_each_entry_safe(this_pbus, pbus_next, &pbus->children, node) {
- struct pcidev_cookie *pcp = this_pbus->self->sysdata;
-
- pci_fill_in_pbm_cookies(this_pbus, pbm, pcp->prom_node);
- }
-}
-static void __init bad_assignment(struct pci_dev *pdev,
- struct linux_prom_pci_registers *ap,
- struct resource *res,
- int do_prom_halt)
-{
- prom_printf("PCI: Bogus PROM assignment. BUS[%02x] DEVFN[%x]\n",
- pdev->bus->number, pdev->devfn);
- if (ap)
- prom_printf("PCI: phys[%08x:%08x:%08x] size[%08x:%08x]\n",
- ap->phys_hi, ap->phys_mid, ap->phys_lo,
- ap->size_hi, ap->size_lo);
- if (res)
- prom_printf("PCI: RES[%016lx-->%016lx:(%lx)]\n",
- res->start, res->end, res->flags);
- if (do_prom_halt)
- prom_halt();
-}
-
-static struct resource *
-__init get_root_resource(struct linux_prom_pci_registers *ap,
- struct pci_pbm_info *pbm)
-{
- int space = (ap->phys_hi >> 24) & 3;
-
- switch (space) {
- case 0:
- /* Configuration space, silently ignore it. */
- return NULL;
-
- case 1:
- /* 16-bit IO space */
- return &pbm->io_space;
-
- case 2:
- /* 32-bit MEM space */
- return &pbm->mem_space;
-
- case 3:
- /* 64-bit MEM space, these are allocated out of
- * the 32-bit mem_space range for the PBM, ie.
- * we just zero out the upper 32-bits.
- */
- return &pbm->mem_space;
-
- default:
- printk("PCI: What is resource space %x?\n", space);
- return NULL;
- };
-}
-
-static struct resource *
-__init get_device_resource(struct linux_prom_pci_registers *ap,
- struct pci_dev *pdev)
-{
- struct resource *res;
- int breg = (ap->phys_hi & 0xff);
-
- switch (breg) {
- case PCI_ROM_ADDRESS:
- /* Unfortunately I have seen several cases where
- * buggy FCODE uses a space value of '1' (I/O space)
- * in the register property for the ROM address
- * so disable this sanity check for now.
- */
-#if 0
- {
- int space = (ap->phys_hi >> 24) & 3;
-
- /* It had better be MEM space. */
- if (space != 2)
- bad_assignment(pdev, ap, NULL, 0);
- }
-#endif
- res = &pdev->resource[PCI_ROM_RESOURCE];
- break;
-
- case PCI_BASE_ADDRESS_0:
- case PCI_BASE_ADDRESS_1:
- case PCI_BASE_ADDRESS_2:
- case PCI_BASE_ADDRESS_3:
- case PCI_BASE_ADDRESS_4:
- case PCI_BASE_ADDRESS_5:
- res = &pdev->resource[(breg - PCI_BASE_ADDRESS_0) / 4];
- break;
-
- default:
- bad_assignment(pdev, ap, NULL, 0);
- res = NULL;
- break;
- };
-
- return res;
-}
-
-static void __init pdev_record_assignments(struct pci_pbm_info *pbm,
- struct pci_dev *pdev)
-{
- struct pcidev_cookie *pcp = pdev->sysdata;
- int i;
-
- for (i = 0; i < pcp->num_prom_assignments; i++) {
- struct linux_prom_pci_registers *ap;
- struct resource *root, *res;
-
- /* The format of this property is specified in
- * the PCI Bus Binding to IEEE1275-1994.
- */
- ap = &pcp->prom_assignments[i];
- root = get_root_resource(ap, pbm);
- res = get_device_resource(ap, pdev);
- if (root == NULL || res == NULL ||
- res->flags == 0)
- continue;
-
- /* Ok we know which resource this PROM assignment is
- * for, sanity check it.
- */
- if ((res->start & 0xffffffffUL) != ap->phys_lo)
- bad_assignment(pdev, ap, res, 1);
-
- /* If it is a 64-bit MEM space assignment, verify that
- * the resource is too and that the upper 32-bits match.
- */
- if (((ap->phys_hi >> 24) & 3) == 3) {
- if (((res->flags & IORESOURCE_MEM) == 0) ||
- ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
- != PCI_BASE_ADDRESS_MEM_TYPE_64))
- bad_assignment(pdev, ap, res, 1);
- if ((res->start >> 32) != ap->phys_mid)
- bad_assignment(pdev, ap, res, 1);
-
- /* PBM cannot generate cpu initiated PIOs
- * to the full 64-bit space. Therefore the
- * upper 32-bits better be zero. If it is
- * not, just skip it and we will assign it
- * properly ourselves.
- */
- if ((res->start >> 32) != 0UL) {
- printk(KERN_ERR "PCI: OBP assigns out of range MEM address "
- "%016lx for region %ld on device %s\n",
- res->start, (res - &pdev->resource[0]), pci_name(pdev));
- continue;
- }
- }
-
- /* Adjust the resource into the physical address space
- * of this PBM.
- */
- pbm->parent->resource_adjust(pdev, res, root);
-
- if (request_resource(root, res) < 0) {
- int rnum;
-
- /* OK, there is some conflict. But this is fine
- * since we'll reassign it in the fixup pass.
- *
- * Do not print the warning for ROM resources
- * as such a conflict is quite common and
- * harmless as the ROM bar is disabled.
- */
- rnum = (res - &pdev->resource[0]);
- if (rnum != PCI_ROM_RESOURCE)
- printk(KERN_ERR "PCI: Resource collision, "
- "region %d "
- "[%016lx:%016lx] of device %s\n",
- rnum,
- res->start, res->end,
- pci_name(pdev));
- }
- }
-}
-
-void __init pci_record_assignments(struct pci_pbm_info *pbm,
- struct pci_bus *pbus)
-{
- struct pci_dev *dev;
- struct pci_bus *bus;
+ p->name = "System ROM";
+ p->start = mem_res->start + 0xf0000UL;
+ p->end = p->start + 0xffffUL;
+ p->flags = IORESOURCE_BUSY;
+ request_resource(mem_res, p);
- list_for_each_entry(dev, &pbus->devices, bus_list)
- pdev_record_assignments(pbm, dev);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return;
- list_for_each_entry(bus, &pbus->children, node)
- pci_record_assignments(pbm, bus);
+ p->name = "Video ROM";
+ p->start = mem_res->start + 0xc0000UL;
+ p->end = p->start + 0x7fffUL;
+ p->flags = IORESOURCE_BUSY;
+ request_resource(mem_res, p);
}
-/* Return non-zero if PDEV has implicit I/O resources even
- * though it may not have an I/O base address register
- * active.
- */
-static int __init has_implicit_io(struct pci_dev *pdev)
+static void pci_register_iommu_region(struct pci_pbm_info *pbm)
{
- int class = pdev->class >> 8;
+ const u32 *vdma = of_get_property(pbm->prom_node, "virtual-dma", NULL);
- if (class == PCI_CLASS_NOT_DEFINED ||
- class == PCI_CLASS_NOT_DEFINED_VGA ||
- class == PCI_CLASS_STORAGE_IDE ||
- (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
- return 1;
+ if (vdma) {
+ struct resource *rp = kmalloc(sizeof(*rp), GFP_KERNEL);
- return 0;
-}
-
-static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm,
- struct pci_dev *pdev)
-{
- u32 reg;
- u16 cmd;
- int i, io_seen, mem_seen;
-
- io_seen = mem_seen = 0;
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- struct resource *root, *res;
- unsigned long size, min, max, align;
-
- res = &pdev->resource[i];
-
- if (res->flags & IORESOURCE_IO)
- io_seen++;
- else if (res->flags & IORESOURCE_MEM)
- mem_seen++;
-
- /* If it is already assigned or the resource does
- * not exist, there is nothing to do.
- */
- if (res->parent != NULL || res->flags == 0UL)
- continue;
-
- /* Determine the root we allocate from. */
- if (res->flags & IORESOURCE_IO) {
- root = &pbm->io_space;
- min = root->start + 0x400UL;
- max = root->end;
- } else {
- root = &pbm->mem_space;
- min = root->start;
- max = min + 0x80000000UL;
- }
-
- size = res->end - res->start;
- align = size + 1;
- if (allocate_resource(root, res, size + 1, min, max, align, NULL, NULL) < 0) {
- /* uh oh */
- prom_printf("PCI: Failed to allocate resource %d for %s\n",
- i, pci_name(pdev));
+ if (!rp) {
+ prom_printf("Cannot allocate IOMMU resource.\n");
prom_halt();
}
-
- /* Update PCI config space. */
- pbm->parent->base_address_update(pdev, i);
- }
-
- /* Special case, disable the ROM. Several devices
- * act funny (ie. do not respond to memory space writes)
- * when it is left enabled. A good example are Qlogic,ISP
- * adapters.
- */
- pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &reg);
- reg &= ~PCI_ROM_ADDRESS_ENABLE;
- pci_write_config_dword(pdev, PCI_ROM_ADDRESS, reg);
-
- /* If we saw I/O or MEM resources, enable appropriate
- * bits in PCI command register.
- */
- if (io_seen || mem_seen) {
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- if (io_seen || has_implicit_io(pdev))
- cmd |= PCI_COMMAND_IO;
- if (mem_seen)
- cmd |= PCI_COMMAND_MEMORY;
- pci_write_config_word(pdev, PCI_COMMAND, cmd);
- }
-
- /* If this is a PCI bridge or an IDE controller,
- * enable bus mastering. In the former case also
- * set the cache line size correctly.
- */
- if (((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) ||
- (((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) &&
- ((pdev->class & 0x80) != 0))) {
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- cmd |= PCI_COMMAND_MASTER;
- pci_write_config_word(pdev, PCI_COMMAND, cmd);
-
- if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
- pci_write_config_byte(pdev,
- PCI_CACHE_LINE_SIZE,
- (64 / sizeof(u32)));
+ rp->name = "IOMMU";
+ rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
+ rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
+ rp->flags = IORESOURCE_BUSY;
+ request_resource(&pbm->mem_space, rp);
}
}
-void __init pci_assign_unassigned(struct pci_pbm_info *pbm,
- struct pci_bus *pbus)
+void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
{
- struct pci_dev *dev;
- struct pci_bus *bus;
-
- list_for_each_entry(dev, &pbus->devices, bus_list)
- pdev_assign_unassigned(pbm, dev);
+ const struct linux_prom_pci_ranges *pbm_ranges;
+ int i, saw_mem, saw_io;
+ int num_pbm_ranges;
- list_for_each_entry(bus, &pbus->children, node)
- pci_assign_unassigned(pbm, bus);
-}
+ saw_mem = saw_io = 0;
+ pbm_ranges = of_get_property(pbm->prom_node, "ranges", &i);
+ num_pbm_ranges = i / sizeof(*pbm_ranges);
-static void __init pdev_fixup_irq(struct pci_dev *pdev)
-{
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct of_device *op = pcp->op;
+ for (i = 0; i < num_pbm_ranges; i++) {
+ const struct linux_prom_pci_ranges *pr = &pbm_ranges[i];
+ unsigned long a;
+ u32 parent_phys_hi, parent_phys_lo;
+ int type;
- if (op->irqs[0] == 0xffffffff) {
- pdev->irq = PCI_IRQ_NONE;
- return;
- }
+ parent_phys_hi = pr->parent_phys_hi;
+ parent_phys_lo = pr->parent_phys_lo;
+ if (tlb_type == hypervisor)
+ parent_phys_hi &= 0x0fffffff;
- pdev->irq = op->irqs[0];
+ type = (pr->child_phys_hi >> 24) & 0x3;
+ a = (((unsigned long)parent_phys_hi << 32UL) |
+ ((unsigned long)parent_phys_lo << 0UL));
- pci_write_config_byte(pdev, PCI_INTERRUPT_LINE,
- pdev->irq & PCI_IRQ_INO);
-}
-
-void __init pci_fixup_irq(struct pci_pbm_info *pbm,
- struct pci_bus *pbus)
-{
- struct pci_dev *dev;
- struct pci_bus *bus;
-
- list_for_each_entry(dev, &pbus->devices, bus_list)
- pdev_fixup_irq(dev);
-
- list_for_each_entry(bus, &pbus->children, node)
- pci_fixup_irq(pbm, bus);
-}
-
-static void pdev_setup_busmastering(struct pci_dev *pdev, int is_66mhz)
-{
- u16 cmd;
- u8 hdr_type, min_gnt, ltimer;
-
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- cmd |= PCI_COMMAND_MASTER;
- pci_write_config_word(pdev, PCI_COMMAND, cmd);
-
- /* Read it back, if the mastering bit did not
- * get set, the device does not support bus
- * mastering so we have nothing to do here.
- */
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- if ((cmd & PCI_COMMAND_MASTER) == 0)
- return;
-
- /* Set correct cache line size, 64-byte on all
- * Sparc64 PCI systems. Note that the value is
- * measured in 32-bit words.
- */
- pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
- 64 / sizeof(u32));
-
- pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr_type);
- hdr_type &= ~0x80;
- if (hdr_type != PCI_HEADER_TYPE_NORMAL)
- return;
-
- /* If the latency timer is already programmed with a non-zero
- * value, assume whoever set it (OBP or whoever) knows what
- * they are doing.
- */
- pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ltimer);
- if (ltimer != 0)
- return;
-
- /* XXX Since I'm tipping off the min grant value to
- * XXX choose a suitable latency timer value, I also
- * XXX considered making use of the max latency value
- * XXX as well. Unfortunately I've seen too many bogusly
- * XXX low settings for it to the point where it lacks
- * XXX any usefulness. In one case, an ethernet card
- * XXX claimed a min grant of 10 and a max latency of 5.
- * XXX Now, if I had two such cards on the same bus I
- * XXX could not set the desired burst period (calculated
- * XXX from min grant) without violating the max latency
- * XXX bound. Duh...
- * XXX
- * XXX I blame dumb PC bios implementors for stuff like
- * XXX this, most of them don't even try to do something
- * XXX sensible with latency timer values and just set some
- * XXX default value (usually 32) into every device.
- */
-
- pci_read_config_byte(pdev, PCI_MIN_GNT, &min_gnt);
-
- if (min_gnt == 0) {
- /* If no min_gnt setting then use a default
- * value.
- */
- if (is_66mhz)
- ltimer = 16;
- else
- ltimer = 32;
- } else {
- int shift_factor;
-
- if (is_66mhz)
- shift_factor = 2;
- else
- shift_factor = 3;
-
- /* Use a default value when the min_gnt value
- * is erroneously high.
- */
- if (((unsigned int) min_gnt << shift_factor) > 512 ||
- ((min_gnt << shift_factor) & 0xff) == 0) {
- ltimer = 8 << shift_factor;
- } else {
- ltimer = min_gnt << shift_factor;
- }
- }
+ switch (type) {
+ case 0:
+ /* PCI config space, 16MB */
+ pbm->config_space = a;
+ break;
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ltimer);
-}
+ case 1:
+ /* 16-bit IO space, 16MB */
+ pbm->io_space.start = a;
+ pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
+ pbm->io_space.flags = IORESOURCE_IO;
+ saw_io = 1;
+ break;
-void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
- struct pci_bus *pbus)
-{
- struct pci_dev *pdev;
- int all_are_66mhz;
- u16 status;
+ case 2:
+ /* 32-bit MEM space, 2GB */
+ pbm->mem_space.start = a;
+ pbm->mem_space.end = a + (0x80000000UL - 1UL);
+ pbm->mem_space.flags = IORESOURCE_MEM;
+ saw_mem = 1;
+ break;
- if (pbm->is_66mhz_capable == 0) {
- all_are_66mhz = 0;
- goto out;
- }
+ case 3:
+ /* XXX 64-bit MEM handling XXX */
- all_are_66mhz = 1;
- list_for_each_entry(pdev, &pbus->devices, bus_list) {
- pci_read_config_word(pdev, PCI_STATUS, &status);
- if (!(status & PCI_STATUS_66MHZ)) {
- all_are_66mhz = 0;
+ default:
break;
- }
+ };
}
-out:
- pbm->all_devs_66mhz = all_are_66mhz;
-
- printk("PCI%d(PBM%c): Bus running at %dMHz\n",
- pbm->parent->index,
- (pbm == &pbm->parent->pbm_A) ? 'A' : 'B',
- (all_are_66mhz ? 66 : 33));
-}
-
-void pci_setup_busmastering(struct pci_pbm_info *pbm,
- struct pci_bus *pbus)
-{
- struct pci_dev *dev;
- struct pci_bus *bus;
- int is_66mhz;
-
- is_66mhz = pbm->is_66mhz_capable && pbm->all_devs_66mhz;
-
- list_for_each_entry(dev, &pbus->devices, bus_list)
- pdev_setup_busmastering(dev, is_66mhz);
-
- list_for_each_entry(bus, &pbus->children, node)
- pci_setup_busmastering(pbm, bus);
-}
-
-void pci_register_legacy_regions(struct resource *io_res,
- struct resource *mem_res)
-{
- struct resource *p;
-
- /* VGA Video RAM. */
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return;
- p->name = "Video RAM area";
- p->start = mem_res->start + 0xa0000UL;
- p->end = p->start + 0x1ffffUL;
- p->flags = IORESOURCE_BUSY;
- request_resource(mem_res, p);
+ if (!saw_io || !saw_mem) {
+ prom_printf("%s: Fatal error, missing %s PBM range.\n",
+ pbm->name,
+ (!saw_io ? "IO" : "MEM"));
+ prom_halt();
+ }
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return;
+ printk("%s: PCI IO[%lx] MEM[%lx]\n",
+ pbm->name,
+ pbm->io_space.start,
+ pbm->mem_space.start);
- p->name = "System ROM";
- p->start = mem_res->start + 0xf0000UL;
- p->end = p->start + 0xffffUL;
- p->flags = IORESOURCE_BUSY;
- request_resource(mem_res, p);
+ pbm->io_space.name = pbm->mem_space.name = pbm->name;
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return;
+ request_resource(&ioport_resource, &pbm->io_space);
+ request_resource(&iomem_resource, &pbm->mem_space);
- p->name = "Video ROM";
- p->start = mem_res->start + 0xc0000UL;
- p->end = p->start + 0x7fffUL;
- p->flags = IORESOURCE_BUSY;
- request_resource(mem_res, p);
+ pci_register_legacy_regions(&pbm->io_space,
+ &pbm->mem_space);
+ pci_register_iommu_region(pbm);
}
/* Generic helper routines for PCI error reporting. */
diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h
index 971e2bea30b..1208583fcb8 100644
--- a/arch/sparc64/kernel/pci_impl.h
+++ b/arch/sparc64/kernel/pci_impl.h
@@ -1,7 +1,6 @@
-/* $Id: pci_impl.h,v 1.9 2001/06/13 06:34:30 davem Exp $
- * pci_impl.h: Helper definitions for PCI controller support.
+/* pci_impl.h: Helper definitions for PCI controller support.
*
- * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
*/
#ifndef PCI_IMPL_H
@@ -13,26 +12,22 @@
#include <asm/prom.h>
extern struct pci_controller_info *pci_controller_root;
+extern unsigned long pci_memspace_mask;
extern int pci_num_controllers;
/* PCI bus scanning and fixup support. */
-extern void pci_fixup_host_bridge_self(struct pci_bus *pbus);
-extern void pci_fill_in_pbm_cookies(struct pci_bus *pbus,
- struct pci_pbm_info *pbm,
- struct device_node *prom_node);
-extern void pci_record_assignments(struct pci_pbm_info *pbm,
- struct pci_bus *pbus);
-extern void pci_assign_unassigned(struct pci_pbm_info *pbm,
- struct pci_bus *pbus);
-extern void pci_fixup_irq(struct pci_pbm_info *pbm,
- struct pci_bus *pbus);
-extern void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
- struct pci_bus *pbus);
-extern void pci_setup_busmastering(struct pci_pbm_info *pbm,
- struct pci_bus *pbus);
-extern void pci_register_legacy_regions(struct resource *io_res,
- struct resource *mem_res);
+extern struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm);
+extern void pci_determine_mem_io_space(struct pci_pbm_info *pbm);
+
+extern int pci_host_bridge_read_pci_cfg(struct pci_bus *bus_dev,
+ unsigned int devfn,
+ int where, int size,
+ u32 *value);
+extern int pci_host_bridge_write_pci_cfg(struct pci_bus *bus_dev,
+ unsigned int devfn,
+ int where, int size,
+ u32 value);
/* Error reporting support. */
extern void pci_scan_for_target_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 2e7f1427088..66712772f49 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -1,7 +1,6 @@
-/* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
- * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
+/* pci_iommu.c: UltraSparc PCI controller IOM/STC support.
*
- * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
*/
@@ -36,7 +35,7 @@
"i" (ASI_PHYS_BYPASS_EC_E))
/* Must be invoked under the IOMMU lock. */
-static void __iommu_flushall(struct pci_iommu *iommu)
+static void __iommu_flushall(struct iommu *iommu)
{
unsigned long tag;
int entry;
@@ -64,7 +63,7 @@ static void __iommu_flushall(struct pci_iommu *iommu)
#define IOPTE_IS_DUMMY(iommu, iopte) \
((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
-static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
+static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
{
unsigned long val = iopte_val(*iopte);
@@ -75,9 +74,9 @@ static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
}
/* Based largely upon the ppc64 iommu allocator. */
-static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages)
+static long pci_arena_alloc(struct iommu *iommu, unsigned long npages)
{
- struct pci_iommu_arena *arena = &iommu->arena;
+ struct iommu_arena *arena = &iommu->arena;
unsigned long n, i, start, end, limit;
int pass;
@@ -116,7 +115,7 @@ again:
return n;
}
-static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
+static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
{
unsigned long i;
@@ -124,7 +123,7 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un
__clear_bit(i, arena->map);
}
-void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
+void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
{
unsigned long i, tsbbase, order, sz, num_tsb_entries;
@@ -170,7 +169,7 @@ void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset,
iopte_make_dummy(iommu, &iommu->page_table[i]);
}
-static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages)
+static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
{
long entry;
@@ -181,12 +180,12 @@ static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npage
return iommu->page_table + entry;
}
-static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages)
+static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
{
pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
}
-static int iommu_alloc_ctx(struct pci_iommu *iommu)
+static int iommu_alloc_ctx(struct iommu *iommu)
{
int lowest = iommu->ctx_lowest_free;
int sz = IOMMU_NUM_CTXS - lowest;
@@ -205,7 +204,7 @@ static int iommu_alloc_ctx(struct pci_iommu *iommu)
return n;
}
-static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
+static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
{
if (likely(ctx)) {
__clear_bit(ctx, iommu->ctx_bitmap);
@@ -220,8 +219,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
*/
static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
+ struct iommu *iommu;
iopte_t *iopte;
unsigned long flags, order, first_page;
void *ret;
@@ -237,8 +235,7 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
return NULL;
memset((char *)first_page, 0, PAGE_SIZE << order);
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
+ iommu = pdev->dev.archdata.iommu;
spin_lock_irqsave(&iommu->lock, flags);
iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
@@ -268,14 +265,12 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
/* Free and unmap a consistent DMA translation. */
static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
+ struct iommu *iommu;
iopte_t *iopte;
unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
+ iommu = pdev->dev.archdata.iommu;
iopte = iommu->page_table +
((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
@@ -295,18 +290,16 @@ static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
*/
static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- struct pci_strbuf *strbuf;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, oaddr;
unsigned long i, base_paddr, ctx;
u32 bus_addr, ret;
unsigned long iopte_protection;
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- strbuf = &pcp->pbm->stc;
+ iommu = pdev->dev.archdata.iommu;
+ strbuf = pdev->dev.archdata.stc;
if (unlikely(direction == PCI_DMA_NONE))
goto bad_no_ctx;
@@ -349,7 +342,7 @@ bad_no_ctx:
return PCI_DMA_ERROR_CODE;
}
-static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
+static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
{
int limit;
@@ -416,9 +409,8 @@ do_flush_sync:
/* Unmap a single streaming mode DMA translation. */
static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- struct pci_strbuf *strbuf;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, ctx, i;
@@ -428,9 +420,8 @@ static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
return;
}
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- strbuf = &pcp->pbm->stc;
+ iommu = pdev->dev.archdata.iommu;
+ strbuf = pdev->dev.archdata.stc;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
@@ -549,9 +540,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
*/
static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- struct pci_strbuf *strbuf;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
unsigned long flags, ctx, npages, iopte_protection;
iopte_t *base;
u32 dma_base;
@@ -570,9 +560,8 @@ static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
return 1;
}
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- strbuf = &pcp->pbm->stc;
+ iommu = pdev->dev.archdata.iommu;
+ strbuf = pdev->dev.archdata.stc;
if (unlikely(direction == PCI_DMA_NONE))
goto bad_no_ctx;
@@ -636,9 +625,8 @@ bad_no_ctx:
/* Unmap a set of streaming mode DMA translations. */
static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- struct pci_strbuf *strbuf;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, ctx, i, npages;
u32 bus_addr;
@@ -648,9 +636,8 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
WARN_ON(1);
}
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- strbuf = &pcp->pbm->stc;
+ iommu = pdev->dev.archdata.iommu;
+ strbuf = pdev->dev.archdata.stc;
bus_addr = sglist->dma_address & IO_PAGE_MASK;
@@ -696,14 +683,12 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
*/
static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- struct pci_strbuf *strbuf;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
unsigned long flags, ctx, npages;
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- strbuf = &pcp->pbm->stc;
+ iommu = pdev->dev.archdata.iommu;
+ strbuf = pdev->dev.archdata.stc;
if (!strbuf->strbuf_enabled)
return;
@@ -736,15 +721,13 @@ static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_
*/
static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
- struct pci_strbuf *strbuf;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
unsigned long flags, ctx, npages, i;
u32 bus_addr;
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- strbuf = &pcp->pbm->stc;
+ iommu = pdev->dev.archdata.iommu;
+ strbuf = pdev->dev.archdata.stc;
if (!strbuf->strbuf_enabled)
return;
@@ -775,7 +758,7 @@ static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist
spin_unlock_irqrestore(&iommu->lock, flags);
}
-struct pci_iommu_ops pci_sun4u_iommu_ops = {
+const struct pci_iommu_ops pci_sun4u_iommu_ops = {
.alloc_consistent = pci_4u_alloc_consistent,
.free_consistent = pci_4u_free_consistent,
.map_single = pci_4u_map_single,
@@ -809,13 +792,12 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
{
- struct pcidev_cookie *pcp = pdev->sysdata;
u64 dma_addr_mask;
if (pdev == NULL) {
dma_addr_mask = 0xffffffff;
} else {
- struct pci_iommu *iommu = pcp->pbm->iommu;
+ struct iommu *iommu = pdev->dev.archdata.iommu;
dma_addr_mask = iommu->dma_addr_mask;
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index fda5db223d9..253d40ec224 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1,7 +1,6 @@
-/* $Id: pci_psycho.c,v 1.33 2002/02/01 00:58:33 davem Exp $
- * pci_psycho.c: PSYCHO/U2P specific PCI controller support.
+/* pci_psycho.c: PSYCHO/U2P specific PCI controller support.
*
- * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
+ * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
*/
@@ -119,6 +118,10 @@ static int psycho_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
u16 tmp16;
u8 tmp8;
+ if (bus_dev == pbm->pci_bus && devfn == 0x00)
+ return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
+ size, value);
+
switch (size) {
case 1:
*value = 0xff;
@@ -172,6 +175,9 @@ static int psycho_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
unsigned char bus = bus_dev->number;
u32 *addr;
+ if (bus_dev == pbm->pci_bus && devfn == 0x00)
+ return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
+ size, value);
addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where);
if (!addr)
return PCIBIOS_SUCCESSFUL;
@@ -263,7 +269,7 @@ static void __psycho_check_one_stc(struct pci_controller_info *p,
struct pci_pbm_info *pbm,
int is_pbm_a)
{
- struct pci_strbuf *strbuf = &pbm->stc;
+ struct strbuf *strbuf = &pbm->stc;
unsigned long regbase = p->pbm_A.controller_regs;
unsigned long err_base, tag_base, line_base;
u64 control;
@@ -412,7 +418,7 @@ static void psycho_check_iommu_error(struct pci_controller_info *p,
unsigned long afar,
enum psycho_error_type type)
{
- struct pci_iommu *iommu = p->pbm_A.iommu;
+ struct iommu *iommu = p->pbm_A.iommu;
unsigned long iommu_tag[16];
unsigned long iommu_data[16];
unsigned long flags;
@@ -895,59 +901,6 @@ static void psycho_register_error_handlers(struct pci_controller_info *p)
}
/* PSYCHO boot time probing and initialization. */
-static void psycho_resource_adjust(struct pci_dev *pdev,
- struct resource *res,
- struct resource *root)
-{
- res->start += root->start;
- res->end += root->start;
-}
-
-static void psycho_base_address_update(struct pci_dev *pdev, int resource)
-{
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct pci_pbm_info *pbm = pcp->pbm;
- struct resource *res, *root;
- u32 reg;
- int where, size, is_64bit;
-
- res = &pdev->resource[resource];
- if (resource < 6) {
- where = PCI_BASE_ADDRESS_0 + (resource * 4);
- } else if (resource == PCI_ROM_RESOURCE) {
- where = pdev->rom_base_reg;
- } else {
- /* Somebody might have asked allocation of a non-standard resource */
- return;
- }
-
- is_64bit = 0;
- if (res->flags & IORESOURCE_IO)
- root = &pbm->io_space;
- else {
- root = &pbm->mem_space;
- if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
- == PCI_BASE_ADDRESS_MEM_TYPE_64)
- is_64bit = 1;
- }
-
- size = res->end - res->start;
- pci_read_config_dword(pdev, where, &reg);
- reg = ((reg & size) |
- (((u32)(res->start - root->start)) & ~size));
- if (resource == PCI_ROM_RESOURCE) {
- reg |= PCI_ROM_ADDRESS_ENABLE;
- res->flags |= IORESOURCE_ROM_ENABLE;
- }
- pci_write_config_dword(pdev, where, reg);
-
- /* This knows that the upper 32-bits of the address
- * must be zero. Our PCI common layer enforces this.
- */
- if (is_64bit)
- pci_write_config_dword(pdev, where + 4, 0);
-}
-
static void pbm_config_busmastering(struct pci_pbm_info *pbm)
{
u8 *addr;
@@ -968,28 +921,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm)
static void pbm_scan_bus(struct pci_controller_info *p,
struct pci_pbm_info *pbm)
{
- struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
-
- if (!cookie) {
- prom_printf("PSYCHO: Critical allocation failure.\n");
- prom_halt();
- }
-
- /* All we care about is the PBM. */
- cookie->pbm = pbm;
-
- pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
- p->pci_ops,
- pbm);
- pci_fixup_host_bridge_self(pbm->pci_bus);
- pbm->pci_bus->self->sysdata = cookie;
-
- pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
- pci_record_assignments(pbm, pbm->pci_bus);
- pci_assign_unassigned(pbm, pbm->pci_bus);
- pci_fixup_irq(pbm, pbm->pci_bus);
- pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
- pci_setup_busmastering(pbm, pbm->pci_bus);
+ pbm->pci_bus = pci_scan_one_pbm(pbm);
}
static void psycho_scan_bus(struct pci_controller_info *p)
@@ -1009,7 +941,7 @@ static void psycho_scan_bus(struct pci_controller_info *p)
static void psycho_iommu_init(struct pci_controller_info *p)
{
- struct pci_iommu *iommu = p->pbm_A.iommu;
+ struct iommu *iommu = p->pbm_A.iommu;
unsigned long i;
u64 control;
@@ -1094,19 +1026,6 @@ static void psycho_controller_hwinit(struct pci_controller_info *p)
psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG, tmp);
}
-static void pbm_register_toplevel_resources(struct pci_controller_info *p,
- struct pci_pbm_info *pbm)
-{
- char *name = pbm->name;
-
- pbm->io_space.name = pbm->mem_space.name = name;
-
- request_resource(&ioport_resource, &pbm->io_space);
- request_resource(&iomem_resource, &pbm->mem_space);
- pci_register_legacy_regions(&pbm->io_space,
- &pbm->mem_space);
-}
-
static void psycho_pbm_strbuf_init(struct pci_controller_info *p,
struct pci_pbm_info *pbm,
int is_pbm_a)
@@ -1172,19 +1091,11 @@ static void psycho_pbm_init(struct pci_controller_info *p,
unsigned int *busrange;
struct property *prop;
struct pci_pbm_info *pbm;
- int len;
- if (is_pbm_a) {
+ if (is_pbm_a)
pbm = &p->pbm_A;
- pbm->pci_first_slot = 1;
- pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_A;
- pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_A;
- } else {
+ else
pbm = &p->pbm_B;
- pbm->pci_first_slot = 2;
- pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_B;
- pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_B;
- }
pbm->chip_type = PBM_CHIP_TYPE_PSYCHO;
pbm->chip_version = 0;
@@ -1196,41 +1107,15 @@ static void psycho_pbm_init(struct pci_controller_info *p,
if (prop)
pbm->chip_revision = *(int *) prop->value;
- pbm->io_space.end = pbm->io_space.start + PSYCHO_IOSPACE_SIZE;
- pbm->io_space.flags = IORESOURCE_IO;
- pbm->mem_space.end = pbm->mem_space.start + PSYCHO_MEMSPACE_SIZE;
- pbm->mem_space.flags = IORESOURCE_MEM;
-
pbm->parent = p;
pbm->prom_node = dp;
pbm->name = dp->full_name;
- pbm_register_toplevel_resources(p, pbm);
-
printk("%s: PSYCHO PCI Bus Module ver[%x:%x]\n",
pbm->name,
pbm->chip_version, pbm->chip_revision);
- prop = of_find_property(dp, "ranges", &len);
- if (prop) {
- pbm->pbm_ranges = prop->value;
- pbm->num_pbm_ranges =
- (len / sizeof(struct linux_prom_pci_ranges));
- } else {
- pbm->num_pbm_ranges = 0;
- }
-
- prop = of_find_property(dp, "interrupt-map", &len);
- if (prop) {
- pbm->pbm_intmap = prop->value;
- pbm->num_pbm_intmap =
- (len / sizeof(struct linux_prom_pci_intmap));
-
- prop = of_find_property(dp, "interrupt-map-mask", NULL);
- pbm->pbm_intmask = prop->value;
- } else {
- pbm->num_pbm_intmap = 0;
- }
+ pci_determine_mem_io_space(pbm);
prop = of_find_property(dp, "bus-range", NULL);
busrange = prop->value;
@@ -1246,7 +1131,7 @@ void psycho_init(struct device_node *dp, char *model_name)
{
struct linux_prom64_registers *pr_regs;
struct pci_controller_info *p;
- struct pci_iommu *iommu;
+ struct iommu *iommu;
struct property *prop;
u32 upa_portid;
int is_pbm_a;
@@ -1269,7 +1154,7 @@ void psycho_init(struct device_node *dp, char *model_name)
prom_printf("PSYCHO: Fatal memory allocation error.\n");
prom_halt();
}
- iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+ iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
if (!iommu) {
prom_printf("PSYCHO: Fatal memory allocation error.\n");
prom_halt();
@@ -1282,10 +1167,7 @@ void psycho_init(struct device_node *dp, char *model_name)
p->pbm_A.portid = upa_portid;
p->pbm_B.portid = upa_portid;
p->index = pci_num_controllers++;
- p->pbms_same_domain = 0;
p->scan_bus = psycho_scan_bus;
- p->base_address_update = psycho_base_address_update;
- p->resource_adjust = psycho_resource_adjust;
p->pci_ops = &psycho_ops;
prop = of_find_property(dp, "reg", NULL);
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 94bb681f232..397862fbd9e 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -1,7 +1,6 @@
-/* $Id: pci_sabre.c,v 1.42 2002/01/23 11:27:32 davem Exp $
- * pci_sabre.c: Sabre specific PCI controller support.
+/* pci_sabre.c: Sabre specific PCI controller support.
*
- * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
+ * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
*/
@@ -254,9 +253,6 @@ static int __sabre_out_of_range(struct pci_pbm_info *pbm,
return 0;
return ((pbm->parent == 0) ||
- ((pbm == &pbm->parent->pbm_B) &&
- (bus == pbm->pci_first_busno) &&
- PCI_SLOT(devfn) > 8) ||
((pbm == &pbm->parent->pbm_A) &&
(bus == pbm->pci_first_busno) &&
PCI_SLOT(devfn) > 8));
@@ -322,6 +318,12 @@ static int __sabre_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
static int sabre_read_pci_cfg(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *value)
{
+ struct pci_pbm_info *pbm = bus->sysdata;
+
+ if (bus == pbm->pci_bus && devfn == 0x00)
+ return pci_host_bridge_read_pci_cfg(bus, devfn, where,
+ size, value);
+
if (!bus->number && sabre_out_of_range(devfn)) {
switch (size) {
case 1:
@@ -438,6 +440,12 @@ static int __sabre_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
static int sabre_write_pci_cfg(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
+ struct pci_pbm_info *pbm = bus->sysdata;
+
+ if (bus == pbm->pci_bus && devfn == 0x00)
+ return pci_host_bridge_write_pci_cfg(bus, devfn, where,
+ size, value);
+
if (bus->number)
return __sabre_write_pci_cfg(bus, devfn, where, size, value);
@@ -490,7 +498,7 @@ static void sabre_check_iommu_error(struct pci_controller_info *p,
unsigned long afsr,
unsigned long afar)
{
- struct pci_iommu *iommu = p->pbm_A.iommu;
+ struct iommu *iommu = p->pbm_A.iommu;
unsigned long iommu_tag[16];
unsigned long iommu_data[16];
unsigned long flags;
@@ -710,8 +718,8 @@ static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p)
p->index);
ret = IRQ_HANDLED;
}
- pci_read_config_word(sabre_root_bus->self,
- PCI_STATUS, &stat);
+ pci_bus_read_config_word(sabre_root_bus, 0,
+ PCI_STATUS, &stat);
if (stat & (PCI_STATUS_PARITY |
PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_TARGET_ABORT |
@@ -719,8 +727,8 @@ static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p)
PCI_STATUS_SIG_SYSTEM_ERROR)) {
printk("SABRE%d: PCI bus error, PCI_STATUS[%04x]\n",
p->index, stat);
- pci_write_config_word(sabre_root_bus->self,
- PCI_STATUS, 0xffff);
+ pci_bus_write_config_word(sabre_root_bus, 0,
+ PCI_STATUS, 0xffff);
ret = IRQ_HANDLED;
}
return ret;
@@ -800,12 +808,10 @@ static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id)
if (error_bits & (SABRE_PIOAFSR_PTA | SABRE_PIOAFSR_STA)) {
sabre_check_iommu_error(p, afsr, afar);
pci_scan_for_target_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
- pci_scan_for_target_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
}
- if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA)) {
+ if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA))
pci_scan_for_master_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
- pci_scan_for_master_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
- }
+
/* For excessive retries, SABRE/PBM will abort the device
* and there is no way to specifically check for excessive
* retries in the config space status registers. So what
@@ -813,10 +819,8 @@ static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id)
* abort events.
*/
- if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR)) {
+ if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR))
pci_scan_for_parity_error(p, &p->pbm_A, p->pbm_A.pci_bus);
- pci_scan_for_parity_error(p, &p->pbm_B, p->pbm_B.pci_bus);
- }
return IRQ_HANDLED;
}
@@ -869,144 +873,52 @@ static void sabre_register_error_handlers(struct pci_controller_info *p)
sabre_write(base + SABRE_PCICTRL, tmp);
}
-static void sabre_resource_adjust(struct pci_dev *pdev,
- struct resource *res,
- struct resource *root)
-{
- struct pci_pbm_info *pbm = pdev->bus->sysdata;
- unsigned long base;
-
- if (res->flags & IORESOURCE_IO)
- base = pbm->controller_regs + SABRE_IOSPACE;
- else
- base = pbm->controller_regs + SABRE_MEMSPACE;
-
- res->start += base;
- res->end += base;
-}
-
-static void sabre_base_address_update(struct pci_dev *pdev, int resource)
-{
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct pci_pbm_info *pbm = pcp->pbm;
- struct resource *res;
- unsigned long base;
- u32 reg;
- int where, size, is_64bit;
-
- res = &pdev->resource[resource];
- if (resource < 6) {
- where = PCI_BASE_ADDRESS_0 + (resource * 4);
- } else if (resource == PCI_ROM_RESOURCE) {
- where = pdev->rom_base_reg;
- } else {
- /* Somebody might have asked allocation of a non-standard resource */
- return;
- }
-
- is_64bit = 0;
- if (res->flags & IORESOURCE_IO)
- base = pbm->controller_regs + SABRE_IOSPACE;
- else {
- base = pbm->controller_regs + SABRE_MEMSPACE;
- if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
- == PCI_BASE_ADDRESS_MEM_TYPE_64)
- is_64bit = 1;
- }
-
- size = res->end - res->start;
- pci_read_config_dword(pdev, where, &reg);
- reg = ((reg & size) |
- (((u32)(res->start - base)) & ~size));
- if (resource == PCI_ROM_RESOURCE) {
- reg |= PCI_ROM_ADDRESS_ENABLE;
- res->flags |= IORESOURCE_ROM_ENABLE;
- }
- pci_write_config_dword(pdev, where, reg);
-
- /* This knows that the upper 32-bits of the address
- * must be zero. Our PCI common layer enforces this.
- */
- if (is_64bit)
- pci_write_config_dword(pdev, where + 4, 0);
-}
-
static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus)
{
struct pci_dev *pdev;
list_for_each_entry(pdev, &sabre_bus->devices, bus_list) {
-
if (pdev->vendor == PCI_VENDOR_ID_SUN &&
pdev->device == PCI_DEVICE_ID_SUN_SIMBA) {
- u32 word32;
u16 word16;
- sabre_read_pci_cfg(pdev->bus, pdev->devfn,
- PCI_COMMAND, 2, &word32);
- word16 = (u16) word32;
+ pci_read_config_word(pdev, PCI_COMMAND, &word16);
word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY |
PCI_COMMAND_IO;
- word32 = (u32) word16;
- sabre_write_pci_cfg(pdev->bus, pdev->devfn,
- PCI_COMMAND, 2, word32);
+ pci_write_config_word(pdev, PCI_COMMAND, word16);
/* Status register bits are "write 1 to clear". */
- sabre_write_pci_cfg(pdev->bus, pdev->devfn,
- PCI_STATUS, 2, 0xffff);
- sabre_write_pci_cfg(pdev->bus, pdev->devfn,
- PCI_SEC_STATUS, 2, 0xffff);
+ pci_write_config_word(pdev, PCI_STATUS, 0xffff);
+ pci_write_config_word(pdev, PCI_SEC_STATUS, 0xffff);
/* Use a primary/seconday latency timer value
* of 64.
*/
- sabre_write_pci_cfg(pdev->bus, pdev->devfn,
- PCI_LATENCY_TIMER, 1, 64);
- sabre_write_pci_cfg(pdev->bus, pdev->devfn,
- PCI_SEC_LATENCY_TIMER, 1, 64);
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
+ pci_write_config_byte(pdev, PCI_SEC_LATENCY_TIMER, 64);
/* Enable reporting/forwarding of master aborts,
* parity, and SERR.
*/
- sabre_write_pci_cfg(pdev->bus, pdev->devfn,
- PCI_BRIDGE_CONTROL, 1,
- (PCI_BRIDGE_CTL_PARITY |
- PCI_BRIDGE_CTL_SERR |
- PCI_BRIDGE_CTL_MASTER_ABORT));
+ pci_write_config_byte(pdev, PCI_BRIDGE_CONTROL,
+ (PCI_BRIDGE_CTL_PARITY |
+ PCI_BRIDGE_CTL_SERR |
+ PCI_BRIDGE_CTL_MASTER_ABORT));
}
}
}
-static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm)
-{
- struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
-
- if (!cookie) {
- prom_printf("SABRE: Critical allocation failure.\n");
- prom_halt();
- }
-
- /* All we care about is the PBM. */
- cookie->pbm = pbm;
-
- return cookie;
-}
-
static void sabre_scan_bus(struct pci_controller_info *p)
{
static int once;
- struct pci_bus *sabre_bus, *pbus;
- struct pci_pbm_info *pbm;
- struct pcidev_cookie *cookie;
- int sabres_scanned;
+ struct pci_bus *pbus;
/* The APB bridge speaks to the Sabre host PCI bridge
* at 66Mhz, but the front side of APB runs at 33Mhz
* for both segments.
*/
p->pbm_A.is_66mhz_capable = 0;
- p->pbm_B.is_66mhz_capable = 0;
/* This driver has not been verified to handle
* multiple SABREs yet, so trap this.
@@ -1020,56 +932,13 @@ static void sabre_scan_bus(struct pci_controller_info *p)
}
once++;
- cookie = alloc_bridge_cookie(&p->pbm_A);
-
- sabre_bus = pci_scan_bus(p->pci_first_busno,
- p->pci_ops,
- &p->pbm_A);
- pci_fixup_host_bridge_self(sabre_bus);
- sabre_bus->self->sysdata = cookie;
-
- sabre_root_bus = sabre_bus;
-
- apb_init(p, sabre_bus);
-
- sabres_scanned = 0;
-
- list_for_each_entry(pbus, &sabre_bus->children, node) {
-
- if (pbus->number == p->pbm_A.pci_first_busno) {
- pbm = &p->pbm_A;
- } else if (pbus->number == p->pbm_B.pci_first_busno) {
- pbm = &p->pbm_B;
- } else
- continue;
-
- cookie = alloc_bridge_cookie(pbm);
- pbus->self->sysdata = cookie;
-
- sabres_scanned++;
+ pbus = pci_scan_one_pbm(&p->pbm_A);
+ if (!pbus)
+ return;
- pbus->sysdata = pbm;
- pbm->pci_bus = pbus;
- pci_fill_in_pbm_cookies(pbus, pbm, pbm->prom_node);
- pci_record_assignments(pbm, pbus);
- pci_assign_unassigned(pbm, pbus);
- pci_fixup_irq(pbm, pbus);
- pci_determine_66mhz_disposition(pbm, pbus);
- pci_setup_busmastering(pbm, pbus);
- }
+ sabre_root_bus = pbus;
- if (!sabres_scanned) {
- /* Hummingbird, no APBs. */
- pbm = &p->pbm_A;
- sabre_bus->sysdata = pbm;
- pbm->pci_bus = sabre_bus;
- pci_fill_in_pbm_cookies(sabre_bus, pbm, pbm->prom_node);
- pci_record_assignments(pbm, sabre_bus);
- pci_assign_unassigned(pbm, sabre_bus);
- pci_fixup_irq(pbm, sabre_bus);
- pci_determine_66mhz_disposition(pbm, sabre_bus);
- pci_setup_busmastering(pbm, sabre_bus);
- }
+ apb_init(p, pbus);
sabre_register_error_handlers(p);
}
@@ -1078,7 +947,7 @@ static void sabre_iommu_init(struct pci_controller_info *p,
int tsbsize, unsigned long dvma_offset,
u32 dma_mask)
{
- struct pci_iommu *iommu = p->pbm_A.iommu;
+ struct iommu *iommu = p->pbm_A.iommu;
unsigned long i;
u64 control;
@@ -1126,224 +995,31 @@ static void sabre_iommu_init(struct pci_controller_info *p,
sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
}
-static void pbm_register_toplevel_resources(struct pci_controller_info *p,
- struct pci_pbm_info *pbm)
-{
- char *name = pbm->name;
- unsigned long ibase = p->pbm_A.controller_regs + SABRE_IOSPACE;
- unsigned long mbase = p->pbm_A.controller_regs + SABRE_MEMSPACE;
- unsigned int devfn;
- unsigned long first, last, i;
- u8 *addr, map;
-
- sprintf(name, "SABRE%d PBM%c",
- p->index,
- (pbm == &p->pbm_A ? 'A' : 'B'));
- pbm->io_space.name = pbm->mem_space.name = name;
-
- devfn = PCI_DEVFN(1, (pbm == &p->pbm_A) ? 0 : 1);
- addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_IO_ADDRESS_MAP);
- map = 0;
- pci_config_read8(addr, &map);
-
- first = 8;
- last = 0;
- for (i = 0; i < 8; i++) {
- if ((map & (1 << i)) != 0) {
- if (first > i)
- first = i;
- if (last < i)
- last = i;
- }
- }
- pbm->io_space.start = ibase + (first << 21UL);
- pbm->io_space.end = ibase + (last << 21UL) + ((1 << 21UL) - 1);
- pbm->io_space.flags = IORESOURCE_IO;
-
- addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_MEM_ADDRESS_MAP);
- map = 0;
- pci_config_read8(addr, &map);
-
- first = 8;
- last = 0;
- for (i = 0; i < 8; i++) {
- if ((map & (1 << i)) != 0) {
- if (first > i)
- first = i;
- if (last < i)
- last = i;
- }
- }
- pbm->mem_space.start = mbase + (first << 29UL);
- pbm->mem_space.end = mbase + (last << 29UL) + ((1 << 29UL) - 1);
- pbm->mem_space.flags = IORESOURCE_MEM;
-
- if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
- prom_printf("Cannot register PBM-%c's IO space.\n",
- (pbm == &p->pbm_A ? 'A' : 'B'));
- prom_halt();
- }
- if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
- prom_printf("Cannot register PBM-%c's MEM space.\n",
- (pbm == &p->pbm_A ? 'A' : 'B'));
- prom_halt();
- }
-
- /* Register legacy regions if this PBM covers that area. */
- if (pbm->io_space.start == ibase &&
- pbm->mem_space.start == mbase)
- pci_register_legacy_regions(&pbm->io_space,
- &pbm->mem_space);
-}
-
-static void sabre_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 dma_start, u32 dma_end)
+static void sabre_pbm_init(struct pci_controller_info *p, struct device_node *dp)
{
struct pci_pbm_info *pbm;
- struct device_node *node;
- struct property *prop;
- u32 *busrange;
- int len, simbas_found;
-
- simbas_found = 0;
- node = dp->child;
- while (node != NULL) {
- if (strcmp(node->name, "pci"))
- goto next_pci;
-
- prop = of_find_property(node, "model", NULL);
- if (!prop || strncmp(prop->value, "SUNW,simba", prop->length))
- goto next_pci;
-
- simbas_found++;
-
- prop = of_find_property(node, "bus-range", NULL);
- busrange = prop->value;
- if (busrange[0] == 1)
- pbm = &p->pbm_B;
- else
- pbm = &p->pbm_A;
-
- pbm->name = node->full_name;
- printk("%s: SABRE PCI Bus Module\n", pbm->name);
-
- pbm->chip_type = PBM_CHIP_TYPE_SABRE;
- pbm->parent = p;
- pbm->prom_node = node;
- pbm->pci_first_slot = 1;
- pbm->pci_first_busno = busrange[0];
- pbm->pci_last_busno = busrange[1];
-
- prop = of_find_property(node, "ranges", &len);
- if (prop) {
- pbm->pbm_ranges = prop->value;
- pbm->num_pbm_ranges =
- (len / sizeof(struct linux_prom_pci_ranges));
- } else {
- pbm->num_pbm_ranges = 0;
- }
- prop = of_find_property(node, "interrupt-map", &len);
- if (prop) {
- pbm->pbm_intmap = prop->value;
- pbm->num_pbm_intmap =
- (len / sizeof(struct linux_prom_pci_intmap));
-
- prop = of_find_property(node, "interrupt-map-mask",
- NULL);
- pbm->pbm_intmask = prop->value;
- } else {
- pbm->num_pbm_intmap = 0;
- }
+ pbm = &p->pbm_A;
+ pbm->name = dp->full_name;
+ printk("%s: SABRE PCI Bus Module\n", pbm->name);
- pbm_register_toplevel_resources(p, pbm);
-
- next_pci:
- node = node->sibling;
- }
- if (simbas_found == 0) {
- struct resource *rp;
+ pbm->chip_type = PBM_CHIP_TYPE_SABRE;
+ pbm->parent = p;
+ pbm->prom_node = dp;
+ pbm->pci_first_busno = p->pci_first_busno;
+ pbm->pci_last_busno = p->pci_last_busno;
- /* No APBs underneath, probably this is a hummingbird
- * system.
- */
- pbm = &p->pbm_A;
- pbm->parent = p;
- pbm->prom_node = dp;
- pbm->pci_first_busno = p->pci_first_busno;
- pbm->pci_last_busno = p->pci_last_busno;
-
- prop = of_find_property(dp, "ranges", &len);
- if (prop) {
- pbm->pbm_ranges = prop->value;
- pbm->num_pbm_ranges =
- (len / sizeof(struct linux_prom_pci_ranges));
- } else {
- pbm->num_pbm_ranges = 0;
- }
-
- prop = of_find_property(dp, "interrupt-map", &len);
- if (prop) {
- pbm->pbm_intmap = prop->value;
- pbm->num_pbm_intmap =
- (len / sizeof(struct linux_prom_pci_intmap));
-
- prop = of_find_property(dp, "interrupt-map-mask",
- NULL);
- pbm->pbm_intmask = prop->value;
- } else {
- pbm->num_pbm_intmap = 0;
- }
-
- pbm->name = dp->full_name;
- printk("%s: SABRE PCI Bus Module\n", pbm->name);
-
- pbm->io_space.name = pbm->mem_space.name = pbm->name;
-
- /* Hack up top-level resources. */
- pbm->io_space.start = p->pbm_A.controller_regs + SABRE_IOSPACE;
- pbm->io_space.end = pbm->io_space.start + (1UL << 24) - 1UL;
- pbm->io_space.flags = IORESOURCE_IO;
-
- pbm->mem_space.start =
- (p->pbm_A.controller_regs + SABRE_MEMSPACE);
- pbm->mem_space.end =
- (pbm->mem_space.start + ((1UL << 32UL) - 1UL));
- pbm->mem_space.flags = IORESOURCE_MEM;
-
- if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
- prom_printf("Cannot register Hummingbird's IO space.\n");
- prom_halt();
- }
- if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
- prom_printf("Cannot register Hummingbird's MEM space.\n");
- prom_halt();
- }
-
- rp = kmalloc(sizeof(*rp), GFP_KERNEL);
- if (!rp) {
- prom_printf("Cannot allocate IOMMU resource.\n");
- prom_halt();
- }
- rp->name = "IOMMU";
- rp->start = pbm->mem_space.start + (unsigned long) dma_start;
- rp->end = pbm->mem_space.start + (unsigned long) dma_end - 1UL;
- rp->flags = IORESOURCE_BUSY;
- request_resource(&pbm->mem_space, rp);
-
- pci_register_legacy_regions(&pbm->io_space,
- &pbm->mem_space);
- }
+ pci_determine_mem_io_space(pbm);
}
void sabre_init(struct device_node *dp, char *model_name)
{
- struct linux_prom64_registers *pr_regs;
+ const struct linux_prom64_registers *pr_regs;
struct pci_controller_info *p;
- struct pci_iommu *iommu;
- struct property *prop;
+ struct iommu *iommu;
int tsbsize;
- u32 *busrange;
- u32 *vdma;
+ const u32 *busrange;
+ const u32 *vdma;
u32 upa_portid, dma_mask;
u64 clear_irq;
@@ -1351,13 +1027,9 @@ void sabre_init(struct device_node *dp, char *model_name)
if (!strcmp(model_name, "pci108e,a001"))
hummingbird_p = 1;
else if (!strcmp(model_name, "SUNW,sabre")) {
- prop = of_find_property(dp, "compatible", NULL);
- if (prop) {
- const char *compat = prop->value;
-
- if (!strcmp(compat, "pci108e,a001"))
- hummingbird_p = 1;
- }
+ const char *compat = of_get_property(dp, "compatible", NULL);
+ if (compat && !strcmp(compat, "pci108e,a001"))
+ hummingbird_p = 1;
if (!hummingbird_p) {
struct device_node *dp;
@@ -1381,37 +1053,28 @@ void sabre_init(struct device_node *dp, char *model_name)
prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n");
prom_halt();
}
- p->pbm_A.iommu = p->pbm_B.iommu = iommu;
+ p->pbm_A.iommu = iommu;
- upa_portid = 0xff;
- prop = of_find_property(dp, "upa-portid", NULL);
- if (prop)
- upa_portid = *(u32 *) prop->value;
+ upa_portid = of_getintprop_default(dp, "upa-portid", 0xff);
p->next = pci_controller_root;
pci_controller_root = p;
p->pbm_A.portid = upa_portid;
- p->pbm_B.portid = upa_portid;
p->index = pci_num_controllers++;
- p->pbms_same_domain = 1;
p->scan_bus = sabre_scan_bus;
- p->base_address_update = sabre_base_address_update;
- p->resource_adjust = sabre_resource_adjust;
p->pci_ops = &sabre_ops;
/*
* Map in SABRE register set and report the presence of this SABRE.
*/
- prop = of_find_property(dp, "reg", NULL);
- pr_regs = prop->value;
+ pr_regs = of_get_property(dp, "reg", NULL);
/*
* First REG in property is base of entire SABRE register space.
*/
p->pbm_A.controller_regs = pr_regs[0].phys_addr;
- p->pbm_B.controller_regs = pr_regs[0].phys_addr;
/* Clear interrupts */
@@ -1429,11 +1092,10 @@ void sabre_init(struct device_node *dp, char *model_name)
SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN));
/* Now map in PCI config space for entire SABRE. */
- p->pbm_A.config_space = p->pbm_B.config_space =
+ p->pbm_A.config_space =
(p->pbm_A.controller_regs + SABRE_CONFIGSPACE);
- prop = of_find_property(dp, "virtual-dma", NULL);
- vdma = prop->value;
+ vdma = of_get_property(dp, "virtual-dma", NULL);
dma_mask = vdma[0];
switch(vdma[1]) {
@@ -1457,13 +1119,12 @@ void sabre_init(struct device_node *dp, char *model_name)
sabre_iommu_init(p, tsbsize, vdma[0], dma_mask);
- prop = of_find_property(dp, "bus-range", NULL);
- busrange = prop->value;
+ busrange = of_get_property(dp, "bus-range", NULL);
p->pci_first_busno = busrange[0];
p->pci_last_busno = busrange[1];
/*
* Look for APB underneath.
*/
- sabre_pbm_init(p, dp, vdma[0], vdma[0] + vdma[1]);
+ sabre_pbm_init(p, dp);
}
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index 66911b126ae..91a7385e5d3 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -1,7 +1,6 @@
-/* $Id: pci_schizo.c,v 1.24 2002/01/23 11:27:32 davem Exp $
- * pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
+/* pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
*
- * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2001, 2002, 2003, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
@@ -126,6 +125,9 @@ static int schizo_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
u16 tmp16;
u8 tmp8;
+ if (bus_dev == pbm->pci_bus && devfn == 0x00)
+ return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
+ size, value);
switch (size) {
case 1:
*value = 0xff;
@@ -179,6 +181,9 @@ static int schizo_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
unsigned char bus = bus_dev->number;
u32 *addr;
+ if (bus_dev == pbm->pci_bus && devfn == 0x00)
+ return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
+ size, value);
addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
if (!addr)
return PCIBIOS_SUCCESSFUL;
@@ -274,7 +279,7 @@ struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino)
static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
enum schizo_error_type type)
{
- struct pci_strbuf *strbuf = &pbm->stc;
+ struct strbuf *strbuf = &pbm->stc;
unsigned long regbase = pbm->pbm_regs;
unsigned long err_base, tag_base, line_base;
u64 control;
@@ -382,7 +387,7 @@ static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
enum schizo_error_type type)
{
- struct pci_iommu *iommu = pbm->iommu;
+ struct iommu *iommu = pbm->iommu;
unsigned long iommu_tag[16];
unsigned long iommu_data[16];
unsigned long flags;
@@ -1229,42 +1234,8 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm)
pci_config_write8(addr, 64);
}
-static void pbm_scan_bus(struct pci_controller_info *p,
- struct pci_pbm_info *pbm)
-{
- struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
-
- if (!cookie) {
- prom_printf("%s: Critical allocation failure.\n", pbm->name);
- prom_halt();
- }
-
- /* All we care about is the PBM. */
- cookie->pbm = pbm;
-
- pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
- p->pci_ops,
- pbm);
- pci_fixup_host_bridge_self(pbm->pci_bus);
- pbm->pci_bus->self->sysdata = cookie;
-
- pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
- pci_record_assignments(pbm, pbm->pci_bus);
- pci_assign_unassigned(pbm, pbm->pci_bus);
- pci_fixup_irq(pbm, pbm->pci_bus);
- pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
- pci_setup_busmastering(pbm, pbm->pci_bus);
-}
-
-static void __schizo_scan_bus(struct pci_controller_info *p,
- int chip_type)
+static void schizo_scan_bus(struct pci_controller_info *p)
{
- if (!p->pbm_B.prom_node || !p->pbm_A.prom_node) {
- printk("PCI: Only one PCI bus module of controller found.\n");
- printk("PCI: Ignoring entire controller.\n");
- return;
- }
-
pbm_config_busmastering(&p->pbm_B);
p->pbm_B.is_66mhz_capable =
(of_find_property(p->pbm_B.prom_node, "66mhz-capable", NULL)
@@ -1273,154 +1244,19 @@ static void __schizo_scan_bus(struct pci_controller_info *p,
p->pbm_A.is_66mhz_capable =
(of_find_property(p->pbm_A.prom_node, "66mhz-capable", NULL)
!= NULL);
- pbm_scan_bus(p, &p->pbm_B);
- pbm_scan_bus(p, &p->pbm_A);
+
+ p->pbm_B.pci_bus = pci_scan_one_pbm(&p->pbm_B);
+ p->pbm_A.pci_bus = pci_scan_one_pbm(&p->pbm_A);
/* After the PCI bus scan is complete, we can register
* the error interrupt handlers.
*/
- if (chip_type == PBM_CHIP_TYPE_TOMATILLO)
+ if (p->pbm_B.chip_type == PBM_CHIP_TYPE_TOMATILLO)
tomatillo_register_error_handlers(p);
else
schizo_register_error_handlers(p);
}
-static void schizo_scan_bus(struct pci_controller_info *p)
-{
- __schizo_scan_bus(p, PBM_CHIP_TYPE_SCHIZO);
-}
-
-static void tomatillo_scan_bus(struct pci_controller_info *p)
-{
- __schizo_scan_bus(p, PBM_CHIP_TYPE_TOMATILLO);
-}
-
-static void schizo_base_address_update(struct pci_dev *pdev, int resource)
-{
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct pci_pbm_info *pbm = pcp->pbm;
- struct resource *res, *root;
- u32 reg;
- int where, size, is_64bit;
-
- res = &pdev->resource[resource];
- if (resource < 6) {
- where = PCI_BASE_ADDRESS_0 + (resource * 4);
- } else if (resource == PCI_ROM_RESOURCE) {
- where = pdev->rom_base_reg;
- } else {
- /* Somebody might have asked allocation of a non-standard resource */
- return;
- }
-
- is_64bit = 0;
- if (res->flags & IORESOURCE_IO)
- root = &pbm->io_space;
- else {
- root = &pbm->mem_space;
- if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
- == PCI_BASE_ADDRESS_MEM_TYPE_64)
- is_64bit = 1;
- }
-
- size = res->end - res->start;
- pci_read_config_dword(pdev, where, &reg);
- reg = ((reg & size) |
- (((u32)(res->start - root->start)) & ~size));
- if (resource == PCI_ROM_RESOURCE) {
- reg |= PCI_ROM_ADDRESS_ENABLE;
- res->flags |= IORESOURCE_ROM_ENABLE;
- }
- pci_write_config_dword(pdev, where, reg);
-
- /* This knows that the upper 32-bits of the address
- * must be zero. Our PCI common layer enforces this.
- */
- if (is_64bit)
- pci_write_config_dword(pdev, where + 4, 0);
-}
-
-static void schizo_resource_adjust(struct pci_dev *pdev,
- struct resource *res,
- struct resource *root)
-{
- res->start += root->start;
- res->end += root->start;
-}
-
-/* Use ranges property to determine where PCI MEM, I/O, and Config
- * space are for this PCI bus module.
- */
-static void schizo_determine_mem_io_space(struct pci_pbm_info *pbm)
-{
- int i, saw_cfg, saw_mem, saw_io;
-
- saw_cfg = saw_mem = saw_io = 0;
- for (i = 0; i < pbm->num_pbm_ranges; i++) {
- struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
- unsigned long a;
- int type;
-
- type = (pr->child_phys_hi >> 24) & 0x3;
- a = (((unsigned long)pr->parent_phys_hi << 32UL) |
- ((unsigned long)pr->parent_phys_lo << 0UL));
-
- switch (type) {
- case 0:
- /* PCI config space, 16MB */
- pbm->config_space = a;
- saw_cfg = 1;
- break;
-
- case 1:
- /* 16-bit IO space, 16MB */
- pbm->io_space.start = a;
- pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
- pbm->io_space.flags = IORESOURCE_IO;
- saw_io = 1;
- break;
-
- case 2:
- /* 32-bit MEM space, 2GB */
- pbm->mem_space.start = a;
- pbm->mem_space.end = a + (0x80000000UL - 1UL);
- pbm->mem_space.flags = IORESOURCE_MEM;
- saw_mem = 1;
- break;
-
- default:
- break;
- };
- }
-
- if (!saw_cfg || !saw_io || !saw_mem) {
- prom_printf("%s: Fatal error, missing %s PBM range.\n",
- pbm->name,
- ((!saw_cfg ?
- "CFG" :
- (!saw_io ?
- "IO" : "MEM"))));
- prom_halt();
- }
-
- printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n",
- pbm->name,
- pbm->config_space,
- pbm->io_space.start,
- pbm->mem_space.start);
-}
-
-static void pbm_register_toplevel_resources(struct pci_controller_info *p,
- struct pci_pbm_info *pbm)
-{
- pbm->io_space.name = pbm->mem_space.name = pbm->name;
-
- request_resource(&ioport_resource, &pbm->io_space);
- request_resource(&iomem_resource, &pbm->mem_space);
- pci_register_legacy_regions(&pbm->io_space,
- &pbm->mem_space);
-}
-
#define SCHIZO_STRBUF_CONTROL (0x02800UL)
#define SCHIZO_STRBUF_FLUSH (0x02808UL)
#define SCHIZO_STRBUF_FSYNC (0x02810UL)
@@ -1472,7 +1308,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
{
- struct pci_iommu *iommu = pbm->iommu;
+ struct iommu *iommu = pbm->iommu;
unsigned long i, tagbase, database;
struct property *prop;
u32 vdma[2], dma_mask;
@@ -1654,14 +1490,12 @@ static void schizo_pbm_init(struct pci_controller_info *p,
struct device_node *dp, u32 portid,
int chip_type)
{
- struct linux_prom64_registers *regs;
- struct property *prop;
- unsigned int *busrange;
+ const struct linux_prom64_registers *regs;
+ const unsigned int *busrange;
struct pci_pbm_info *pbm;
const char *chipset_name;
- u32 *ino_bitmap;
+ const u32 *ino_bitmap;
int is_pbm_a;
- int len;
switch (chip_type) {
case PBM_CHIP_TYPE_TOMATILLO:
@@ -1689,11 +1523,9 @@ static void schizo_pbm_init(struct pci_controller_info *p,
* 3) PBM PCI config space
* 4) Ichip regs
*/
- prop = of_find_property(dp, "reg", NULL);
- regs = prop->value;
+ regs = of_get_property(dp, "reg", NULL);
is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000);
-
if (is_pbm_a)
pbm = &p->pbm_A;
else
@@ -1702,17 +1534,10 @@ static void schizo_pbm_init(struct pci_controller_info *p,
pbm->portid = portid;
pbm->parent = p;
pbm->prom_node = dp;
- pbm->pci_first_slot = 1;
pbm->chip_type = chip_type;
- pbm->chip_version = 0;
- prop = of_find_property(dp, "version#", NULL);
- if (prop)
- pbm->chip_version = *(int *) prop->value;
- pbm->chip_revision = 0;
- prop = of_find_property(dp, "module-revision#", NULL);
- if (prop)
- pbm->chip_revision = *(int *) prop->value;
+ pbm->chip_version = of_getintprop_default(dp, "version#", 0);
+ pbm->chip_revision = of_getintprop_default(dp, "module-version#", 0);
pbm->pbm_regs = regs[0].phys_addr;
pbm->controller_regs = regs[1].phys_addr - 0x10000UL;
@@ -1723,40 +1548,18 @@ static void schizo_pbm_init(struct pci_controller_info *p,
pbm->name = dp->full_name;
printk("%s: %s PCI Bus Module ver[%x:%x]\n",
- pbm->name,
- (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
- "TOMATILLO" : "SCHIZO"),
+ pbm->name, chipset_name,
pbm->chip_version, pbm->chip_revision);
schizo_pbm_hw_init(pbm);
- prop = of_find_property(dp, "ranges", &len);
- pbm->pbm_ranges = prop->value;
- pbm->num_pbm_ranges =
- (len / sizeof(struct linux_prom_pci_ranges));
+ pci_determine_mem_io_space(pbm);
- schizo_determine_mem_io_space(pbm);
- pbm_register_toplevel_resources(p, pbm);
-
- prop = of_find_property(dp, "interrupt-map", &len);
- if (prop) {
- pbm->pbm_intmap = prop->value;
- pbm->num_pbm_intmap =
- (len / sizeof(struct linux_prom_pci_intmap));
-
- prop = of_find_property(dp, "interrupt-map-mask", NULL);
- pbm->pbm_intmask = prop->value;
- } else {
- pbm->num_pbm_intmap = 0;
- }
-
- prop = of_find_property(dp, "ino-bitmap", NULL);
- ino_bitmap = prop->value;
+ ino_bitmap = of_get_property(dp, "ino-bitmap", NULL);
pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) |
((u64)ino_bitmap[0] << 0UL));
- prop = of_find_property(dp, "bus-range", NULL);
- busrange = prop->value;
+ busrange = of_get_property(dp, "bus-range", NULL);
pbm->pci_first_busno = busrange[0];
pbm->pci_last_busno = busrange[1];
@@ -1777,15 +1580,10 @@ static inline int portid_compare(u32 x, u32 y, int chip_type)
static void __schizo_init(struct device_node *dp, char *model_name, int chip_type)
{
struct pci_controller_info *p;
- struct pci_iommu *iommu;
- struct property *prop;
- int is_pbm_a;
+ struct iommu *iommu;
u32 portid;
- portid = 0xff;
- prop = of_find_property(dp, "portid", NULL);
- if (prop)
- portid = *(u32 *) prop->value;
+ portid = of_getintprop_default(dp, "portid", 0xff);
for (p = pci_controller_root; p; p = p->next) {
struct pci_pbm_info *pbm;
@@ -1798,48 +1596,43 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ
&p->pbm_B);
if (portid_compare(pbm->portid, portid, chip_type)) {
- is_pbm_a = (p->pbm_A.prom_node == NULL);
schizo_pbm_init(p, dp, portid, chip_type);
return;
}
}
p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
- if (!p) {
- prom_printf("SCHIZO: Fatal memory allocation error.\n");
- prom_halt();
- }
+ if (!p)
+ goto memfail;
+
+ iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
+ if (!iommu)
+ goto memfail;
- iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
- if (!iommu) {
- prom_printf("SCHIZO: Fatal memory allocation error.\n");
- prom_halt();
- }
p->pbm_A.iommu = iommu;
- iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
- if (!iommu) {
- prom_printf("SCHIZO: Fatal memory allocation error.\n");
- prom_halt();
- }
+ iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
+ if (!iommu)
+ goto memfail;
+
p->pbm_B.iommu = iommu;
p->next = pci_controller_root;
pci_controller_root = p;
p->index = pci_num_controllers++;
- p->pbms_same_domain = 0;
- p->scan_bus = (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
- tomatillo_scan_bus :
- schizo_scan_bus);
- p->base_address_update = schizo_base_address_update;
- p->resource_adjust = schizo_resource_adjust;
+ p->scan_bus = schizo_scan_bus;
p->pci_ops = &schizo_ops;
/* Like PSYCHO we have a 2GB aligned area for memory space. */
pci_memspace_mask = 0x7fffffffUL;
schizo_pbm_init(p, dp, portid, chip_type);
+ return;
+
+memfail:
+ prom_printf("SCHIZO: Fatal memory allocation error.\n");
+ prom_halt();
}
void schizo_init(struct device_node *dp, char *model_name)
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index ec22cd61ec8..94295c21932 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -1,6 +1,6 @@
/* pci_sun4v.c: SUN4V specific PCI controller support.
*
- * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
@@ -29,7 +29,7 @@
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
-struct pci_iommu_batch {
+struct iommu_batch {
struct pci_dev *pdev; /* Device mapping is for. */
unsigned long prot; /* IOMMU page protections */
unsigned long entry; /* Index into IOTSB. */
@@ -37,12 +37,12 @@ struct pci_iommu_batch {
unsigned long npages; /* Number of pages in list. */
};
-static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch);
+static DEFINE_PER_CPU(struct iommu_batch, pci_iommu_batch);
/* Interrupts must be disabled. */
static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry)
{
- struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
+ struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
p->pdev = pdev;
p->prot = prot;
@@ -51,10 +51,10 @@ static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long pro
}
/* Interrupts must be disabled. */
-static long pci_iommu_batch_flush(struct pci_iommu_batch *p)
+static long pci_iommu_batch_flush(struct iommu_batch *p)
{
- struct pcidev_cookie *pcp = p->pdev->sysdata;
- unsigned long devhandle = pcp->pbm->devhandle;
+ struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller;
+ unsigned long devhandle = pbm->devhandle;
unsigned long prot = p->prot;
unsigned long entry = p->entry;
u64 *pglist = p->pglist;
@@ -89,7 +89,7 @@ static long pci_iommu_batch_flush(struct pci_iommu_batch *p)
/* Interrupts must be disabled. */
static inline long pci_iommu_batch_add(u64 phys_page)
{
- struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
+ struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
@@ -103,14 +103,14 @@ static inline long pci_iommu_batch_add(u64 phys_page)
/* Interrupts must be disabled. */
static inline long pci_iommu_batch_end(void)
{
- struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
+ struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
return pci_iommu_batch_flush(p);
}
-static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
+static long pci_arena_alloc(struct iommu_arena *arena, unsigned long npages)
{
unsigned long n, i, start, end, limit;
int pass;
@@ -149,7 +149,7 @@ again:
return n;
}
-static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
+static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
{
unsigned long i;
@@ -159,8 +159,7 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un
static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
+ struct iommu *iommu;
unsigned long flags, order, first_page, npages, n;
void *ret;
long entry;
@@ -178,8 +177,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
memset((char *)first_page, 0, PAGE_SIZE << order);
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
+ iommu = pdev->dev.archdata.iommu;
spin_lock_irqsave(&iommu->lock, flags);
entry = pci_arena_alloc(&iommu->arena, npages);
@@ -226,15 +224,15 @@ arena_alloc_fail:
static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
+ struct pci_pbm_info *pbm;
+ struct iommu *iommu;
unsigned long flags, order, npages, entry;
u32 devhandle;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- devhandle = pcp->pbm->devhandle;
+ iommu = pdev->dev.archdata.iommu;
+ pbm = pdev->dev.archdata.host_controller;
+ devhandle = pbm->devhandle;
entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags);
@@ -259,16 +257,14 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
+ struct iommu *iommu;
unsigned long flags, npages, oaddr;
unsigned long i, base_paddr;
u32 bus_addr, ret;
unsigned long prot;
long entry;
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
+ iommu = pdev->dev.archdata.iommu;
if (unlikely(direction == PCI_DMA_NONE))
goto bad;
@@ -324,8 +320,8 @@ iommu_map_fail:
static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
+ struct pci_pbm_info *pbm;
+ struct iommu *iommu;
unsigned long flags, npages;
long entry;
u32 devhandle;
@@ -336,9 +332,9 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
return;
}
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- devhandle = pcp->pbm->devhandle;
+ iommu = pdev->dev.archdata.iommu;
+ pbm = pdev->dev.archdata.host_controller;
+ devhandle = pbm->devhandle;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
@@ -460,8 +456,7 @@ iommu_map_failed:
static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
+ struct iommu *iommu;
unsigned long flags, npages, prot;
u32 dma_base;
struct scatterlist *sgtmp;
@@ -480,8 +475,7 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
return 1;
}
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
+ iommu = pdev->dev.archdata.iommu;
if (unlikely(direction == PCI_DMA_NONE))
goto bad;
@@ -537,8 +531,8 @@ iommu_map_failed:
static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
{
- struct pcidev_cookie *pcp;
- struct pci_iommu *iommu;
+ struct pci_pbm_info *pbm;
+ struct iommu *iommu;
unsigned long flags, i, npages;
long entry;
u32 devhandle, bus_addr;
@@ -548,9 +542,9 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
WARN_ON(1);
}
- pcp = pdev->sysdata;
- iommu = pcp->pbm->iommu;
- devhandle = pcp->pbm->devhandle;
+ iommu = pdev->dev.archdata.iommu;
+ pbm = pdev->dev.archdata.host_controller;
+ devhandle = pbm->devhandle;
bus_addr = sglist->dma_address & IO_PAGE_MASK;
@@ -589,7 +583,7 @@ static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist
/* Nothing to do... */
}
-struct pci_iommu_ops pci_sun4v_iommu_ops = {
+const struct pci_iommu_ops pci_sun4v_iommu_ops = {
.alloc_consistent = pci_4v_alloc_consistent,
.free_consistent = pci_4v_free_consistent,
.map_single = pci_4v_map_single,
@@ -600,132 +594,12 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = {
.dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
};
-/* SUN4V PCI configuration space accessors. */
-
-struct pdev_entry {
- struct pdev_entry *next;
- u32 devhandle;
- unsigned int bus;
- unsigned int device;
- unsigned int func;
-};
-
-#define PDEV_HTAB_SIZE 16
-#define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
-static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];
-
-static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
-{
- unsigned int val;
-
- val = (devhandle ^ (devhandle >> 4));
- val ^= bus;
- val ^= device;
- val ^= func;
-
- return val & PDEV_HTAB_MASK;
-}
-
-static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
-{
- struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
- struct pdev_entry **slot;
-
- if (!p)
- return -ENOMEM;
-
- slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
- p->next = *slot;
- *slot = p;
-
- p->devhandle = devhandle;
- p->bus = bus;
- p->device = device;
- p->func = func;
-
- return 0;
-}
-
-/* Recursively descend into the OBP device tree, rooted at toplevel_node,
- * looking for a PCI device matching bus and devfn.
- */
-static int obp_find(struct device_node *toplevel_node, unsigned int bus, unsigned int devfn)
-{
- toplevel_node = toplevel_node->child;
-
- while (toplevel_node != NULL) {
- struct linux_prom_pci_registers *regs;
- struct property *prop;
- int ret;
-
- ret = obp_find(toplevel_node, bus, devfn);
- if (ret != 0)
- return ret;
-
- prop = of_find_property(toplevel_node, "reg", NULL);
- if (!prop)
- goto next_sibling;
-
- regs = prop->value;
- if (((regs->phys_hi >> 16) & 0xff) == bus &&
- ((regs->phys_hi >> 8) & 0xff) == devfn)
- break;
-
- next_sibling:
- toplevel_node = toplevel_node->sibling;
- }
-
- return toplevel_node != NULL;
-}
-
-static int pdev_htab_populate(struct pci_pbm_info *pbm)
-{
- u32 devhandle = pbm->devhandle;
- unsigned int bus;
-
- for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
- unsigned int devfn;
-
- for (devfn = 0; devfn < 256; devfn++) {
- unsigned int device = PCI_SLOT(devfn);
- unsigned int func = PCI_FUNC(devfn);
-
- if (obp_find(pbm->prom_node, bus, devfn)) {
- int err = pdev_htab_add(devhandle, bus,
- device, func);
- if (err)
- return err;
- }
- }
- }
-
- return 0;
-}
-
-static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
-{
- struct pdev_entry *p;
-
- p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
- while (p) {
- if (p->devhandle == devhandle &&
- p->bus == bus &&
- p->device == device &&
- p->func == func)
- break;
-
- p = p->next;
- }
-
- return p;
-}
-
static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
{
if (bus < pbm->pci_first_busno ||
bus > pbm->pci_last_busno)
return 1;
- return pdev_find(pbm->devhandle, bus, device, func) == NULL;
+ return 0;
}
static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
@@ -738,6 +612,9 @@ static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
unsigned int func = PCI_FUNC(devfn);
unsigned long ret;
+ if (bus_dev == pbm->pci_bus && devfn == 0x00)
+ return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
+ size, value);
if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
ret = ~0UL;
} else {
@@ -776,6 +653,9 @@ static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
unsigned int func = PCI_FUNC(devfn);
unsigned long ret;
+ if (bus_dev == pbm->pci_bus && devfn == 0x00)
+ return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
+ size, value);
if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
/* Do nothing. */
} else {
@@ -800,27 +680,7 @@ static struct pci_ops pci_sun4v_ops = {
static void pbm_scan_bus(struct pci_controller_info *p,
struct pci_pbm_info *pbm)
{
- struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
-
- if (!cookie) {
- prom_printf("%s: Critical allocation failure.\n", pbm->name);
- prom_halt();
- }
-
- /* All we care about is the PBM. */
- cookie->pbm = pbm;
-
- pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
-#if 0
- pci_fixup_host_bridge_self(pbm->pci_bus);
- pbm->pci_bus->self->sysdata = cookie;
-#endif
- pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
- pci_record_assignments(pbm, pbm->pci_bus);
- pci_assign_unassigned(pbm, pbm->pci_bus);
- pci_fixup_irq(pbm, pbm->pci_bus);
- pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
- pci_setup_busmastering(pbm, pbm->pci_bus);
+ pbm->pci_bus = pci_scan_one_pbm(pbm);
}
static void pci_sun4v_scan_bus(struct pci_controller_info *p)
@@ -844,130 +704,10 @@ static void pci_sun4v_scan_bus(struct pci_controller_info *p)
/* XXX register error interrupt handlers XXX */
}
-static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
-{
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct pci_pbm_info *pbm = pcp->pbm;
- struct resource *res, *root;
- u32 reg;
- int where, size, is_64bit;
-
- res = &pdev->resource[resource];
- if (resource < 6) {
- where = PCI_BASE_ADDRESS_0 + (resource * 4);
- } else if (resource == PCI_ROM_RESOURCE) {
- where = pdev->rom_base_reg;
- } else {
- /* Somebody might have asked allocation of a non-standard resource */
- return;
- }
-
- /* XXX 64-bit MEM handling is not %100 correct... XXX */
- is_64bit = 0;
- if (res->flags & IORESOURCE_IO)
- root = &pbm->io_space;
- else {
- root = &pbm->mem_space;
- if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
- == PCI_BASE_ADDRESS_MEM_TYPE_64)
- is_64bit = 1;
- }
-
- size = res->end - res->start;
- pci_read_config_dword(pdev, where, &reg);
- reg = ((reg & size) |
- (((u32)(res->start - root->start)) & ~size));
- if (resource == PCI_ROM_RESOURCE) {
- reg |= PCI_ROM_ADDRESS_ENABLE;
- res->flags |= IORESOURCE_ROM_ENABLE;
- }
- pci_write_config_dword(pdev, where, reg);
-
- /* This knows that the upper 32-bits of the address
- * must be zero. Our PCI common layer enforces this.
- */
- if (is_64bit)
- pci_write_config_dword(pdev, where + 4, 0);
-}
-
-static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
- struct resource *res,
- struct resource *root)
-{
- res->start += root->start;
- res->end += root->start;
-}
-
-/* Use ranges property to determine where PCI MEM, I/O, and Config
- * space are for this PCI bus module.
- */
-static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
-{
- int i, saw_mem, saw_io;
-
- saw_mem = saw_io = 0;
- for (i = 0; i < pbm->num_pbm_ranges; i++) {
- struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
- unsigned long a;
- int type;
-
- type = (pr->child_phys_hi >> 24) & 0x3;
- a = (((unsigned long)pr->parent_phys_hi << 32UL) |
- ((unsigned long)pr->parent_phys_lo << 0UL));
-
- switch (type) {
- case 1:
- /* 16-bit IO space, 16MB */
- pbm->io_space.start = a;
- pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
- pbm->io_space.flags = IORESOURCE_IO;
- saw_io = 1;
- break;
-
- case 2:
- /* 32-bit MEM space, 2GB */
- pbm->mem_space.start = a;
- pbm->mem_space.end = a + (0x80000000UL - 1UL);
- pbm->mem_space.flags = IORESOURCE_MEM;
- saw_mem = 1;
- break;
-
- case 3:
- /* XXX 64-bit MEM handling XXX */
-
- default:
- break;
- };
- }
-
- if (!saw_io || !saw_mem) {
- prom_printf("%s: Fatal error, missing %s PBM range.\n",
- pbm->name,
- (!saw_io ? "IO" : "MEM"));
- prom_halt();
- }
-
- printk("%s: PCI IO[%lx] MEM[%lx]\n",
- pbm->name,
- pbm->io_space.start,
- pbm->mem_space.start);
-}
-
-static void pbm_register_toplevel_resources(struct pci_controller_info *p,
- struct pci_pbm_info *pbm)
-{
- pbm->io_space.name = pbm->mem_space.name = pbm->name;
-
- request_resource(&ioport_resource, &pbm->io_space);
- request_resource(&iomem_resource, &pbm->mem_space);
- pci_register_legacy_regions(&pbm->io_space,
- &pbm->mem_space);
-}
-
static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
- struct pci_iommu *iommu)
+ struct iommu *iommu)
{
- struct pci_iommu_arena *arena = &iommu->arena;
+ struct iommu_arena *arena = &iommu->arena;
unsigned long i, cnt = 0;
u32 devhandle;
@@ -994,7 +734,7 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
- struct pci_iommu *iommu = pbm->iommu;
+ struct iommu *iommu = pbm->iommu;
struct property *prop;
unsigned long num_tsb_entries, sz;
u32 vdma[2], dma_mask, dma_offset;
@@ -1281,7 +1021,7 @@ h_error:
static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
{
- u32 *val;
+ const u32 *val;
int len;
val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
@@ -1289,16 +1029,16 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
goto no_msi;
pbm->msiq_num = *val;
if (pbm->msiq_num) {
- struct msiq_prop {
+ const struct msiq_prop {
u32 first_msiq;
u32 num_msiq;
u32 first_devino;
} *mqp;
- struct msi_range_prop {
+ const struct msi_range_prop {
u32 first_msi;
u32 num_msi;
} *mrng;
- struct addr_range_prop {
+ const struct addr_range_prop {
u32 msi32_high;
u32 msi32_low;
u32 msi32_len;
@@ -1410,8 +1150,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
struct pci_dev *pdev,
struct msi_desc *entry)
{
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct pci_pbm_info *pbm = pcp->pbm;
+ struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
unsigned long devino, msiqid;
struct msi_msg msg;
int msi_num, err;
@@ -1455,7 +1194,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID))
goto out_err;
- pcp->msi_num = msi_num;
+ pdev->dev.archdata.msi_num = msi_num;
if (entry->msi_attrib.is_64) {
msg.address_hi = pbm->msi64_start >> 32;
@@ -1484,12 +1223,11 @@ out_err:
static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq,
struct pci_dev *pdev)
{
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct pci_pbm_info *pbm = pcp->pbm;
+ struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
unsigned long msiqid, err;
unsigned int msi_num;
- msi_num = pcp->msi_num;
+ msi_num = pdev->dev.archdata.msi_num;
err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid);
if (err) {
printk(KERN_ERR "%s: getmsiq gives error %lu\n",
@@ -1516,8 +1254,6 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
{
struct pci_pbm_info *pbm;
- struct property *prop;
- int len, i;
if (devhandle & 0x40)
pbm = &p->pbm_B;
@@ -1526,7 +1262,6 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
pbm->parent = p;
pbm->prom_node = dp;
- pbm->pci_first_slot = 1;
pbm->devhandle = devhandle;
@@ -1534,39 +1269,17 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
printk("%s: SUN4V PCI Bus Module\n", pbm->name);
- prop = of_find_property(dp, "ranges", &len);
- pbm->pbm_ranges = prop->value;
- pbm->num_pbm_ranges =
- (len / sizeof(struct linux_prom_pci_ranges));
-
- /* Mask out the top 8 bits of the ranges, leaving the real
- * physical address.
- */
- for (i = 0; i < pbm->num_pbm_ranges; i++)
- pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
-
- pci_sun4v_determine_mem_io_space(pbm);
- pbm_register_toplevel_resources(p, pbm);
-
- prop = of_find_property(dp, "interrupt-map", &len);
- pbm->pbm_intmap = prop->value;
- pbm->num_pbm_intmap =
- (len / sizeof(struct linux_prom_pci_intmap));
-
- prop = of_find_property(dp, "interrupt-map-mask", NULL);
- pbm->pbm_intmask = prop->value;
+ pci_determine_mem_io_space(pbm);
pci_sun4v_get_bus_range(pbm);
pci_sun4v_iommu_init(pbm);
pci_sun4v_msi_init(pbm);
-
- pdev_htab_populate(pbm);
}
void sun4v_pci_init(struct device_node *dp, char *model_name)
{
struct pci_controller_info *p;
- struct pci_iommu *iommu;
+ struct iommu *iommu;
struct property *prop;
struct linux_prom64_registers *regs;
u32 devhandle;
@@ -1606,13 +1319,13 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
if (!p)
goto fatal_memory_error;
- iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+ iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
if (!iommu)
goto fatal_memory_error;
p->pbm_A.iommu = iommu;
- iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+ iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
if (!iommu)
goto fatal_memory_error;
@@ -1622,11 +1335,8 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
pci_controller_root = p;
p->index = pci_num_controllers++;
- p->pbms_same_domain = 0;
p->scan_bus = pci_sun4v_scan_bus;
- p->base_address_update = pci_sun4v_base_address_update;
- p->resource_adjust = pci_sun4v_resource_adjust;
#ifdef CONFIG_PCI_MSI
p->setup_msi_irq = pci_sun4v_setup_msi_irq;
p->teardown_msi_irq = pci_sun4v_teardown_msi_irq;
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index b291060c25a..a114151f9fb 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -28,6 +28,7 @@
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/compat.h>
+#include <linux/tick.h>
#include <linux/init.h>
#include <asm/oplib.h>
@@ -88,12 +89,14 @@ void cpu_idle(void)
set_thread_flag(TIF_POLLING_NRFLAG);
while(1) {
- if (need_resched()) {
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
- sparc64_yield();
+ tick_nohz_stop_sched_tick();
+ while (!need_resched())
+ sparc64_yield();
+ tick_nohz_restart_sched_tick();
+
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
}
}
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index 0917c24c4f0..5e1fcd05160 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -36,12 +36,13 @@ static struct device_node *allnodes;
*/
static DEFINE_RWLOCK(devtree_lock);
-int of_device_is_compatible(struct device_node *device, const char *compat)
+int of_device_is_compatible(const struct device_node *device,
+ const char *compat)
{
const char* cp;
int cplen, l;
- cp = (char *) of_get_property(device, "compatible", &cplen);
+ cp = of_get_property(device, "compatible", &cplen);
if (cp == NULL)
return 0;
while (cplen > 0) {
@@ -154,13 +155,14 @@ struct device_node *of_find_compatible_node(struct device_node *from,
}
EXPORT_SYMBOL(of_find_compatible_node);
-struct property *of_find_property(struct device_node *np, const char *name,
+struct property *of_find_property(const struct device_node *np,
+ const char *name,
int *lenp)
{
struct property *pp;
for (pp = np->properties; pp != 0; pp = pp->next) {
- if (strcmp(pp->name, name) == 0) {
+ if (strcasecmp(pp->name, name) == 0) {
if (lenp != 0)
*lenp = pp->length;
break;
@@ -174,7 +176,8 @@ EXPORT_SYMBOL(of_find_property);
* Find a property with a given name for a given node
* and return the value.
*/
-void *of_get_property(struct device_node *np, const char *name, int *lenp)
+const void *of_get_property(const struct device_node *np, const char *name,
+ int *lenp)
{
struct property *pp = of_find_property(np,name,lenp);
return pp ? pp->value : NULL;
@@ -196,7 +199,7 @@ EXPORT_SYMBOL(of_getintprop_default);
int of_n_addr_cells(struct device_node *np)
{
- int* ip;
+ const int* ip;
do {
if (np->parent)
np = np->parent;
@@ -211,7 +214,7 @@ EXPORT_SYMBOL(of_n_addr_cells);
int of_n_size_cells(struct device_node *np)
{
- int* ip;
+ const int* ip;
do {
if (np->parent)
np = np->parent;
@@ -243,7 +246,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
while (*prevp) {
struct property *prop = *prevp;
- if (!strcmp(prop->name, name)) {
+ if (!strcasecmp(prop->name, name)) {
void *old_val = prop->value;
int ret;
@@ -397,7 +400,7 @@ static unsigned int psycho_irq_build(struct device_node *dp,
static void psycho_irq_trans_init(struct device_node *dp)
{
- struct linux_prom64_registers *regs;
+ const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = psycho_irq_build;
@@ -547,7 +550,7 @@ static unsigned long __sabre_onboard_imap_off[] = {
static int sabre_device_needs_wsync(struct device_node *dp)
{
struct device_node *parent = dp->parent;
- char *parent_model, *parent_compat;
+ const char *parent_model, *parent_compat;
/* This traversal up towards the root is meant to
* handle two cases:
@@ -589,7 +592,7 @@ static unsigned int sabre_irq_build(struct device_node *dp,
{
struct sabre_irq_data *irq_data = _data;
unsigned long controller_regs = irq_data->controller_regs;
- struct linux_prom_pci_registers *regs;
+ const struct linux_prom_pci_registers *regs;
unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int inofixup = 0;
@@ -639,9 +642,9 @@ static unsigned int sabre_irq_build(struct device_node *dp,
static void sabre_irq_trans_init(struct device_node *dp)
{
- struct linux_prom64_registers *regs;
+ const struct linux_prom64_registers *regs;
struct sabre_irq_data *irq_data;
- u32 *busrange;
+ const u32 *busrange;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = sabre_irq_build;
@@ -795,7 +798,7 @@ static unsigned int schizo_irq_build(struct device_node *dp,
static void __schizo_irq_trans_init(struct device_node *dp, int is_tomatillo)
{
- struct linux_prom64_registers *regs;
+ const struct linux_prom64_registers *regs;
struct schizo_irq_data *irq_data;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
@@ -836,7 +839,7 @@ static unsigned int pci_sun4v_irq_build(struct device_node *dp,
static void pci_sun4v_irq_trans_init(struct device_node *dp)
{
- struct linux_prom64_registers *regs;
+ const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = pci_sun4v_irq_build;
@@ -940,7 +943,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp,
void *_data)
{
unsigned long reg_base = (unsigned long) _data;
- struct linux_prom_registers *regs;
+ const struct linux_prom_registers *regs;
unsigned long imap, iclr;
int sbus_slot = 0;
int sbus_level = 0;
@@ -994,7 +997,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp,
static void sbus_irq_trans_init(struct device_node *dp)
{
- struct linux_prom64_registers *regs;
+ const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = sbus_of_build_irq;
@@ -1080,7 +1083,7 @@ static unsigned int sun4v_vdev_irq_build(struct device_node *dp,
static void sun4v_vdev_irq_trans_init(struct device_node *dp)
{
- struct linux_prom64_registers *regs;
+ const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = sun4v_vdev_irq_build;
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index 01d6d869ea2..3b05428cc90 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -24,48 +24,11 @@
#include "iommu_common.h"
-/* These should be allocated on an SMP_CACHE_BYTES
- * aligned boundary for optimal performance.
- *
- * On SYSIO, using an 8K page size we have 1GB of SBUS
- * DMA space mapped. We divide this space into equally
- * sized clusters. We allocate a DMA mapping from the
- * cluster that matches the order of the allocation, or
- * if the order is greater than the number of clusters,
- * we try to allocate from the last cluster.
- */
-
-#define NCLUSTERS 8UL
-#define ONE_GIG (1UL * 1024UL * 1024UL * 1024UL)
-#define CLUSTER_SIZE (ONE_GIG / NCLUSTERS)
-#define CLUSTER_MASK (CLUSTER_SIZE - 1)
-#define CLUSTER_NPAGES (CLUSTER_SIZE >> IO_PAGE_SHIFT)
#define MAP_BASE ((u32)0xc0000000)
-struct sbus_iommu {
-/*0x00*/spinlock_t lock;
-
-/*0x08*/iopte_t *page_table;
-/*0x10*/unsigned long strbuf_regs;
-/*0x18*/unsigned long iommu_regs;
-/*0x20*/unsigned long sbus_control_reg;
-
-/*0x28*/volatile unsigned long strbuf_flushflag;
-
- /* If NCLUSTERS is ever decresed to 4 or lower,
- * you must increase the size of the type of
- * these counters. You have been duly warned. -DaveM
- */
-/*0x30*/struct {
- u16 next;
- u16 flush;
- } alloc_info[NCLUSTERS];
-
- /* The lowest used consistent mapping entry. Since
- * we allocate consistent maps out of cluster 0 this
- * is relative to the beginning of closter 0.
- */
-/*0x50*/u32 lowest_consistent_map;
+struct sbus_info {
+ struct iommu iommu;
+ struct strbuf strbuf;
};
/* Offsets from iommu_regs */
@@ -81,29 +44,17 @@ struct sbus_iommu {
#define IOMMU_DRAM_VALID (1UL << 30UL)
-static void __iommu_flushall(struct sbus_iommu *iommu)
+static void __iommu_flushall(struct iommu *iommu)
{
- unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
+ unsigned long tag;
int entry;
+ tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
for (entry = 0; entry < 16; entry++) {
upa_writeq(0, tag);
tag += 8UL;
}
- upa_readq(iommu->sbus_control_reg);
-
- for (entry = 0; entry < NCLUSTERS; entry++) {
- iommu->alloc_info[entry].flush =
- iommu->alloc_info[entry].next;
- }
-}
-
-static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
-{
- while (npages--)
- upa_writeq(base + (npages << IO_PAGE_SHIFT),
- iommu->iommu_regs + IOMMU_FLUSH);
- upa_readq(iommu->sbus_control_reg);
+ upa_readq(iommu->write_complete_reg);
}
/* Offsets from strbuf_regs */
@@ -118,15 +69,14 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages
#define STRBUF_TAG_VALID 0x02UL
-static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction)
+static void sbus_strbuf_flush(struct iommu *iommu, struct strbuf *strbuf, u32 base, unsigned long npages, int direction)
{
unsigned long n;
int limit;
n = npages;
while (n--)
- upa_writeq(base + (n << IO_PAGE_SHIFT),
- iommu->strbuf_regs + STRBUF_PFLUSH);
+ upa_writeq(base + (n << IO_PAGE_SHIFT), strbuf->strbuf_pflush);
/* If the device could not have possibly put dirty data into
* the streaming cache, no flush-flag synchronization needs
@@ -135,15 +85,14 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long
if (direction == SBUS_DMA_TODEVICE)
return;
- iommu->strbuf_flushflag = 0UL;
+ *(strbuf->strbuf_flushflag) = 0UL;
/* Whoopee cushion! */
- upa_writeq(__pa(&iommu->strbuf_flushflag),
- iommu->strbuf_regs + STRBUF_FSYNC);
- upa_readq(iommu->sbus_control_reg);
+ upa_writeq(strbuf->strbuf_flushflag_pa, strbuf->strbuf_fsync);
+ upa_readq(iommu->write_complete_reg);
limit = 100000;
- while (iommu->strbuf_flushflag == 0UL) {
+ while (*(strbuf->strbuf_flushflag) == 0UL) {
limit--;
if (!limit)
break;
@@ -156,288 +105,247 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long
base, npages);
}
-static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
+/* Based largely upon the ppc64 iommu allocator. */
+static long sbus_arena_alloc(struct iommu *iommu, unsigned long npages)
{
- iopte_t *iopte, *limit, *first, *cluster;
- unsigned long cnum, ent, nent, flush_point, found;
-
- cnum = 0;
- nent = 1;
- while ((1UL << cnum) < npages)
- cnum++;
- if(cnum >= NCLUSTERS) {
- nent = 1UL << (cnum - NCLUSTERS);
- cnum = NCLUSTERS - 1;
- }
- iopte = iommu->page_table + (cnum * CLUSTER_NPAGES);
-
- if (cnum == 0)
- limit = (iommu->page_table +
- iommu->lowest_consistent_map);
- else
- limit = (iopte + CLUSTER_NPAGES);
-
- iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
- flush_point = iommu->alloc_info[cnum].flush;
-
- first = iopte;
- cluster = NULL;
- found = 0;
- for (;;) {
- if (iopte_val(*iopte) == 0UL) {
- found++;
- if (!cluster)
- cluster = iopte;
+ struct iommu_arena *arena = &iommu->arena;
+ unsigned long n, i, start, end, limit;
+ int pass;
+
+ limit = arena->limit;
+ start = arena->hint;
+ pass = 0;
+
+again:
+ n = find_next_zero_bit(arena->map, limit, start);
+ end = n + npages;
+ if (unlikely(end >= limit)) {
+ if (likely(pass < 1)) {
+ limit = start;
+ start = 0;
+ __iommu_flushall(iommu);
+ pass++;
+ goto again;
} else {
- /* Used cluster in the way */
- cluster = NULL;
- found = 0;
+ /* Scanned the whole thing, give up. */
+ return -1;
}
+ }
- if (found == nent)
- break;
-
- iopte += (1 << cnum);
- ent++;
- if (iopte >= limit) {
- iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
- ent = 0;
-
- /* Multiple cluster allocations must not wrap */
- cluster = NULL;
- found = 0;
+ for (i = n; i < end; i++) {
+ if (test_bit(i, arena->map)) {
+ start = i + 1;
+ goto again;
}
- if (ent == flush_point)
- __iommu_flushall(iommu);
- if (iopte == first)
- goto bad;
}
- /* ent/iopte points to the last cluster entry we're going to use,
- * so save our place for the next allocation.
- */
- if ((iopte + (1 << cnum)) >= limit)
- ent = 0;
- else
- ent = ent + 1;
- iommu->alloc_info[cnum].next = ent;
- if (ent == flush_point)
- __iommu_flushall(iommu);
-
- /* I've got your streaming cluster right here buddy boy... */
- return cluster;
-
-bad:
- printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
- npages);
- return NULL;
+ for (i = n; i < end; i++)
+ __set_bit(i, arena->map);
+
+ arena->hint = end;
+
+ return n;
}
-static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+static void sbus_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
{
- unsigned long cnum, ent, nent;
- iopte_t *iopte;
+ unsigned long i;
- cnum = 0;
- nent = 1;
- while ((1UL << cnum) < npages)
- cnum++;
- if(cnum >= NCLUSTERS) {
- nent = 1UL << (cnum - NCLUSTERS);
- cnum = NCLUSTERS - 1;
- }
- ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
- iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
- do {
- iopte_val(*iopte) = 0UL;
- iopte += 1 << cnum;
- } while(--nent);
-
- /* If the global flush might not have caught this entry,
- * adjust the flush point such that we will flush before
- * ever trying to reuse it.
- */
-#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
- if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
- iommu->alloc_info[cnum].flush = ent;
-#undef between
+ for (i = base; i < (base + npages); i++)
+ __clear_bit(i, arena->map);
}
-/* We allocate consistent mappings from the end of cluster zero. */
-static iopte_t *alloc_consistent_cluster(struct sbus_iommu *iommu, unsigned long npages)
+static void sbus_iommu_table_init(struct iommu *iommu, unsigned int tsbsize)
{
- iopte_t *iopte;
+ unsigned long tsbbase, order, sz, num_tsb_entries;
- iopte = iommu->page_table + (1 * CLUSTER_NPAGES);
- while (iopte > iommu->page_table) {
- iopte--;
- if (!(iopte_val(*iopte) & IOPTE_VALID)) {
- unsigned long tmp = npages;
+ num_tsb_entries = tsbsize / sizeof(iopte_t);
- while (--tmp) {
- iopte--;
- if (iopte_val(*iopte) & IOPTE_VALID)
- break;
- }
- if (tmp == 0) {
- u32 entry = (iopte - iommu->page_table);
+ /* Setup initial software IOMMU state. */
+ spin_lock_init(&iommu->lock);
+ iommu->page_table_map_base = MAP_BASE;
+
+ /* Allocate and initialize the free area map. */
+ sz = num_tsb_entries / 8;
+ sz = (sz + 7UL) & ~7UL;
+ iommu->arena.map = kzalloc(sz, GFP_KERNEL);
+ if (!iommu->arena.map) {
+ prom_printf("SBUS_IOMMU: Error, kmalloc(arena.map) failed.\n");
+ prom_halt();
+ }
+ iommu->arena.limit = num_tsb_entries;
- if (entry < iommu->lowest_consistent_map)
- iommu->lowest_consistent_map = entry;
- return iopte;
- }
- }
+ /* Now allocate and setup the IOMMU page table itself. */
+ order = get_order(tsbsize);
+ tsbbase = __get_free_pages(GFP_KERNEL, order);
+ if (!tsbbase) {
+ prom_printf("IOMMU: Error, gfp(tsb) failed.\n");
+ prom_halt();
}
- return NULL;
+ iommu->page_table = (iopte_t *)tsbbase;
+ memset(iommu->page_table, 0, tsbsize);
}
-static void free_consistent_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
{
- iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
+ long entry;
- if ((iopte - iommu->page_table) == iommu->lowest_consistent_map) {
- iopte_t *walk = iopte + npages;
- iopte_t *limit;
+ entry = sbus_arena_alloc(iommu, npages);
+ if (unlikely(entry < 0))
+ return NULL;
- limit = iommu->page_table + CLUSTER_NPAGES;
- while (walk < limit) {
- if (iopte_val(*walk) != 0UL)
- break;
- walk++;
- }
- iommu->lowest_consistent_map =
- (walk - iommu->page_table);
- }
+ return iommu->page_table + entry;
+}
- while (npages--)
- *iopte++ = __iopte(0UL);
+static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
+{
+ sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
}
void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
{
- unsigned long order, first_page, flags;
- struct sbus_iommu *iommu;
+ struct sbus_info *info;
+ struct iommu *iommu;
iopte_t *iopte;
+ unsigned long flags, order, first_page;
void *ret;
int npages;
- if (size <= 0 || sdev == NULL || dvma_addr == NULL)
- return NULL;
-
size = IO_PAGE_ALIGN(size);
order = get_order(size);
if (order >= 10)
return NULL;
+
first_page = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
if (first_page == 0UL)
return NULL;
memset((char *)first_page, 0, PAGE_SIZE << order);
- iommu = sdev->bus->iommu;
+ info = sdev->bus->iommu;
+ iommu = &info->iommu;
spin_lock_irqsave(&iommu->lock, flags);
- iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
- if (iopte == NULL) {
- spin_unlock_irqrestore(&iommu->lock, flags);
+ iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ if (unlikely(iopte == NULL)) {
free_pages(first_page, order);
return NULL;
}
- /* Ok, we're committed at this point. */
- *dvma_addr = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
+ *dvma_addr = (iommu->page_table_map_base +
+ ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
ret = (void *) first_page;
npages = size >> IO_PAGE_SHIFT;
+ first_page = __pa(first_page);
while (npages--) {
- *iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE |
- (__pa(first_page) & IOPTE_PAGE));
+ iopte_val(*iopte) = (IOPTE_VALID | IOPTE_CACHE |
+ IOPTE_WRITE |
+ (first_page & IOPTE_PAGE));
+ iopte++;
first_page += IO_PAGE_SIZE;
}
- iommu_flush(iommu, *dvma_addr, size >> IO_PAGE_SHIFT);
- spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
{
- unsigned long order, npages;
- struct sbus_iommu *iommu;
-
- if (size <= 0 || sdev == NULL || cpu == NULL)
- return;
+ struct sbus_info *info;
+ struct iommu *iommu;
+ iopte_t *iopte;
+ unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
- iommu = sdev->bus->iommu;
+ info = sdev->bus->iommu;
+ iommu = &info->iommu;
+ iopte = iommu->page_table +
+ ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
- spin_lock_irq(&iommu->lock);
- free_consistent_cluster(iommu, dvma, npages);
- iommu_flush(iommu, dvma, npages);
- spin_unlock_irq(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ free_npages(iommu, dvma - iommu->page_table_map_base, npages);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
}
-dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t size, int dir)
+dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction)
{
- struct sbus_iommu *iommu = sdev->bus->iommu;
- unsigned long npages, pbase, flags;
- iopte_t *iopte;
- u32 dma_base, offset;
- unsigned long iopte_bits;
-
- if (dir == SBUS_DMA_NONE)
+ struct sbus_info *info;
+ struct iommu *iommu;
+ iopte_t *base;
+ unsigned long flags, npages, oaddr;
+ unsigned long i, base_paddr;
+ u32 bus_addr, ret;
+ unsigned long iopte_protection;
+
+ info = sdev->bus->iommu;
+ iommu = &info->iommu;
+
+ if (unlikely(direction == SBUS_DMA_NONE))
BUG();
- pbase = (unsigned long) ptr;
- offset = (u32) (pbase & ~IO_PAGE_MASK);
- size = (IO_PAGE_ALIGN(pbase + size) - (pbase & IO_PAGE_MASK));
- pbase = (unsigned long) __pa(pbase & IO_PAGE_MASK);
+ oaddr = (unsigned long)ptr;
+ npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
+ npages >>= IO_PAGE_SHIFT;
spin_lock_irqsave(&iommu->lock, flags);
- npages = size >> IO_PAGE_SHIFT;
- iopte = alloc_streaming_cluster(iommu, npages);
- if (iopte == NULL)
- goto bad;
- dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
- npages = size >> IO_PAGE_SHIFT;
- iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
- if (dir != SBUS_DMA_TODEVICE)
- iopte_bits |= IOPTE_WRITE;
- while (npages--) {
- *iopte++ = __iopte(iopte_bits | (pbase & IOPTE_PAGE));
- pbase += IO_PAGE_SIZE;
- }
- npages = size >> IO_PAGE_SHIFT;
+ base = alloc_npages(iommu, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
- return (dma_base | offset);
+ if (unlikely(!base))
+ BUG();
-bad:
- spin_unlock_irqrestore(&iommu->lock, flags);
- BUG();
- return 0;
+ bus_addr = (iommu->page_table_map_base +
+ ((base - iommu->page_table) << IO_PAGE_SHIFT));
+ ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
+ base_paddr = __pa(oaddr & IO_PAGE_MASK);
+
+ iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
+ if (direction != SBUS_DMA_TODEVICE)
+ iopte_protection |= IOPTE_WRITE;
+
+ for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
+ iopte_val(*base) = iopte_protection | base_paddr;
+
+ return ret;
}
-void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, int direction)
+void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
{
- struct sbus_iommu *iommu = sdev->bus->iommu;
- u32 dma_base = dma_addr & IO_PAGE_MASK;
- unsigned long flags;
+ struct sbus_info *info = sdev->bus->iommu;
+ struct iommu *iommu = &info->iommu;
+ struct strbuf *strbuf = &info->strbuf;
+ iopte_t *base;
+ unsigned long flags, npages, i;
+
+ if (unlikely(direction == SBUS_DMA_NONE))
+ BUG();
- size = (IO_PAGE_ALIGN(dma_addr + size) - dma_base);
+ npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
+ npages >>= IO_PAGE_SHIFT;
+ base = iommu->page_table +
+ ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+
+ bus_addr &= IO_PAGE_MASK;
spin_lock_irqsave(&iommu->lock, flags);
- free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
- sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT, direction);
+ sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
+ for (i = 0; i < npages; i++)
+ iopte_val(base[i]) = 0UL;
+ free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
}
#define SG_ENT_PHYS_ADDRESS(SG) \
(__pa(page_address((SG)->page)) + (SG)->offset)
-static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, int nelems, unsigned long iopte_bits)
+static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
+ int nused, int nelems, unsigned long iopte_protection)
{
struct scatterlist *dma_sg = sg;
struct scatterlist *sg_end = sg + nelems;
@@ -462,7 +370,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, in
for (;;) {
unsigned long tmp;
- tmp = (unsigned long) SG_ENT_PHYS_ADDRESS(sg);
+ tmp = SG_ENT_PHYS_ADDRESS(sg);
len = sg->length;
if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
pteval = tmp & IO_PAGE_MASK;
@@ -478,7 +386,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, in
sg++;
}
- pteval = ((pteval & IOPTE_PAGE) | iopte_bits);
+ pteval = iopte_protection | (pteval & IOPTE_PAGE);
while (len > 0) {
*iopte++ = __iopte(pteval);
pteval += IO_PAGE_SIZE;
@@ -509,103 +417,121 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, in
}
}
-int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int dir)
+int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
{
- struct sbus_iommu *iommu = sdev->bus->iommu;
- unsigned long flags, npages;
- iopte_t *iopte;
+ struct sbus_info *info;
+ struct iommu *iommu;
+ unsigned long flags, npages, iopte_protection;
+ iopte_t *base;
u32 dma_base;
struct scatterlist *sgtmp;
int used;
- unsigned long iopte_bits;
-
- if (dir == SBUS_DMA_NONE)
- BUG();
/* Fast path single entry scatterlists. */
- if (nents == 1) {
- sg->dma_address =
+ if (nelems == 1) {
+ sglist->dma_address =
sbus_map_single(sdev,
- (page_address(sg->page) + sg->offset),
- sg->length, dir);
- sg->dma_length = sg->length;
+ (page_address(sglist->page) + sglist->offset),
+ sglist->length, direction);
+ sglist->dma_length = sglist->length;
return 1;
}
- npages = prepare_sg(sg, nents);
+ info = sdev->bus->iommu;
+ iommu = &info->iommu;
+
+ if (unlikely(direction == SBUS_DMA_NONE))
+ BUG();
+
+ npages = prepare_sg(sglist, nelems);
spin_lock_irqsave(&iommu->lock, flags);
- iopte = alloc_streaming_cluster(iommu, npages);
- if (iopte == NULL)
- goto bad;
- dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
+ base = alloc_npages(iommu, npages);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ if (unlikely(base == NULL))
+ BUG();
+
+ dma_base = iommu->page_table_map_base +
+ ((base - iommu->page_table) << IO_PAGE_SHIFT);
/* Normalize DVMA addresses. */
- sgtmp = sg;
- used = nents;
+ used = nelems;
+ sgtmp = sglist;
while (used && sgtmp->dma_length) {
sgtmp->dma_address += dma_base;
sgtmp++;
used--;
}
- used = nents - used;
+ used = nelems - used;
+
+ iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
+ if (direction != SBUS_DMA_TODEVICE)
+ iopte_protection |= IOPTE_WRITE;
- iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
- if (dir != SBUS_DMA_TODEVICE)
- iopte_bits |= IOPTE_WRITE;
+ fill_sg(base, sglist, used, nelems, iopte_protection);
- fill_sg(iopte, sg, used, nents, iopte_bits);
#ifdef VERIFY_SG
- verify_sglist(sg, nents, iopte, npages);
+ verify_sglist(sglist, nelems, base, npages);
#endif
- spin_unlock_irqrestore(&iommu->lock, flags);
return used;
-
-bad:
- spin_unlock_irqrestore(&iommu->lock, flags);
- BUG();
- return 0;
}
-void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
+void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
{
- unsigned long size, flags;
- struct sbus_iommu *iommu;
- u32 dvma_base;
- int i;
+ struct sbus_info *info;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
+ iopte_t *base;
+ unsigned long flags, i, npages;
+ u32 bus_addr;
+
+ if (unlikely(direction == SBUS_DMA_NONE))
+ BUG();
- /* Fast path single entry scatterlists. */
- if (nents == 1) {
- sbus_unmap_single(sdev, sg->dma_address, sg->dma_length, direction);
- return;
- }
+ info = sdev->bus->iommu;
+ iommu = &info->iommu;
+ strbuf = &info->strbuf;
- dvma_base = sg[0].dma_address & IO_PAGE_MASK;
- for (i = 0; i < nents; i++) {
- if (sg[i].dma_length == 0)
+ bus_addr = sglist->dma_address & IO_PAGE_MASK;
+
+ for (i = 1; i < nelems; i++)
+ if (sglist[i].dma_length == 0)
break;
- }
i--;
- size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - dvma_base;
+ npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
+ bus_addr) >> IO_PAGE_SHIFT;
+
+ base = iommu->page_table +
+ ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
- iommu = sdev->bus->iommu;
spin_lock_irqsave(&iommu->lock, flags);
- free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
- sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT, direction);
+ sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
+ for (i = 0; i < npages; i++)
+ iopte_val(base[i]) = 0UL;
+ free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
}
-void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
+void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
{
- struct sbus_iommu *iommu = sdev->bus->iommu;
- unsigned long flags;
+ struct sbus_info *info;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
+ unsigned long flags, npages;
+
+ info = sdev->bus->iommu;
+ iommu = &info->iommu;
+ strbuf = &info->strbuf;
- size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
+ npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
+ npages >>= IO_PAGE_SHIFT;
+ bus_addr &= IO_PAGE_MASK;
spin_lock_irqsave(&iommu->lock, flags);
- sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT, direction);
+ sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -613,23 +539,29 @@ void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, siz
{
}
-void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
+void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
{
- struct sbus_iommu *iommu = sdev->bus->iommu;
- unsigned long flags, size;
- u32 base;
- int i;
-
- base = sg[0].dma_address & IO_PAGE_MASK;
- for (i = 0; i < nents; i++) {
- if (sg[i].dma_length == 0)
+ struct sbus_info *info;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
+ unsigned long flags, npages, i;
+ u32 bus_addr;
+
+ info = sdev->bus->iommu;
+ iommu = &info->iommu;
+ strbuf = &info->strbuf;
+
+ bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
+ for (i = 0; i < nelems; i++) {
+ if (!sglist[i].dma_length)
break;
}
i--;
- size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
+ npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
+ - bus_addr) >> IO_PAGE_SHIFT;
spin_lock_irqsave(&iommu->lock, flags);
- sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT, direction);
+ sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -640,12 +572,13 @@ void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg,
/* Enable 64-bit DVMA mode for the given device. */
void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
{
- struct sbus_iommu *iommu = sdev->bus->iommu;
+ struct sbus_info *info = sdev->bus->iommu;
+ struct iommu *iommu = &info->iommu;
int slot = sdev->slot;
unsigned long cfg_reg;
u64 val;
- cfg_reg = iommu->sbus_control_reg;
+ cfg_reg = iommu->write_complete_reg;
switch (slot) {
case 0:
cfg_reg += 0x20UL;
@@ -780,8 +713,9 @@ static unsigned long sysio_imap_to_iclr(unsigned long imap)
unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
{
struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
- struct sbus_iommu *iommu = sbus->iommu;
- unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+ struct sbus_info *info = sbus->iommu;
+ struct iommu *iommu = &info->iommu;
+ unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long imap, iclr;
int sbus_level = 0;
@@ -842,8 +776,9 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
{
struct sbus_bus *sbus = dev_id;
- struct sbus_iommu *iommu = sbus->iommu;
- unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+ struct sbus_info *info = sbus->iommu;
+ struct iommu *iommu = &info->iommu;
+ unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long afsr_reg, afar_reg;
unsigned long afsr, afar, error_bits;
int reported;
@@ -914,8 +849,9 @@ static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
{
struct sbus_bus *sbus = dev_id;
- struct sbus_iommu *iommu = sbus->iommu;
- unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+ struct sbus_info *info = sbus->iommu;
+ struct iommu *iommu = &info->iommu;
+ unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long afsr_reg, afar_reg;
unsigned long afsr, afar, error_bits;
int reported;
@@ -991,12 +927,13 @@ static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
{
struct sbus_bus *sbus = dev_id;
- struct sbus_iommu *iommu = sbus->iommu;
+ struct sbus_info *info = sbus->iommu;
+ struct iommu *iommu = &info->iommu;
unsigned long afsr_reg, afar_reg, reg_base;
unsigned long afsr, afar, error_bits;
int reported;
- reg_base = iommu->sbus_control_reg - 0x2000UL;
+ reg_base = iommu->write_complete_reg - 0x2000UL;
afsr_reg = reg_base + SYSIO_SBUS_AFSR;
afar_reg = reg_base + SYSIO_SBUS_AFAR;
@@ -1058,8 +995,9 @@ static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
{
- struct sbus_iommu *iommu = sbus->iommu;
- unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+ struct sbus_info *info = sbus->iommu;
+ struct iommu *iommu = &info->iommu;
+ unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned int irq;
u64 control;
@@ -1093,18 +1031,20 @@ static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
SYSIO_ECNTRL_CEEN),
reg_base + ECC_CONTROL);
- control = upa_readq(iommu->sbus_control_reg);
+ control = upa_readq(iommu->write_complete_reg);
control |= 0x100UL; /* SBUS Error Interrupt Enable */
- upa_writeq(control, iommu->sbus_control_reg);
+ upa_writeq(control, iommu->write_complete_reg);
}
/* Boot time initialization. */
static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
{
- struct linux_prom64_registers *pr;
+ const struct linux_prom64_registers *pr;
struct device_node *dp;
- struct sbus_iommu *iommu;
- unsigned long regs, tsb_base;
+ struct sbus_info *info;
+ struct iommu *iommu;
+ struct strbuf *strbuf;
+ unsigned long regs, reg_base;
u64 control;
int i;
@@ -1119,94 +1059,87 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
}
regs = pr->phys_addr;
- iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
- if (iommu == NULL) {
- prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
+ info = kzalloc(sizeof(*info), GFP_ATOMIC);
+ if (info == NULL) {
+ prom_printf("sbus_iommu_init: Fatal error, "
+ "kmalloc(info) failed\n");
prom_halt();
}
- /* Align on E$ line boundary. */
- iommu = (struct sbus_iommu *)
- (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
- ~(SMP_CACHE_BYTES - 1UL));
+ iommu = &info->iommu;
+ strbuf = &info->strbuf;
- memset(iommu, 0, sizeof(*iommu));
+ reg_base = regs + SYSIO_IOMMUREG_BASE;
+ iommu->iommu_control = reg_base + IOMMU_CONTROL;
+ iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE;
+ iommu->iommu_flush = reg_base + IOMMU_FLUSH;
- /* We start with no consistent mappings. */
- iommu->lowest_consistent_map = CLUSTER_NPAGES;
+ reg_base = regs + SYSIO_STRBUFREG_BASE;
+ strbuf->strbuf_control = reg_base + STRBUF_CONTROL;
+ strbuf->strbuf_pflush = reg_base + STRBUF_PFLUSH;
+ strbuf->strbuf_fsync = reg_base + STRBUF_FSYNC;
- for (i = 0; i < NCLUSTERS; i++) {
- iommu->alloc_info[i].flush = 0;
- iommu->alloc_info[i].next = 0;
- }
+ strbuf->strbuf_enabled = 1;
- /* Setup spinlock. */
- spin_lock_init(&iommu->lock);
-
- /* Init register offsets. */
- iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
- iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
+ strbuf->strbuf_flushflag = (volatile unsigned long *)
+ ((((unsigned long)&strbuf->__flushflag_buf[0])
+ + 63UL)
+ & ~63UL);
+ strbuf->strbuf_flushflag_pa = (unsigned long)
+ __pa(strbuf->strbuf_flushflag);
/* The SYSIO SBUS control register is used for dummy reads
* in order to ensure write completion.
*/
- iommu->sbus_control_reg = regs + 0x2000UL;
+ iommu->write_complete_reg = regs + 0x2000UL;
/* Link into SYSIO software state. */
- sbus->iommu = iommu;
+ sbus->iommu = info;
printk("SYSIO: UPA portID %x, at %016lx\n",
sbus->portid, regs);
/* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
- control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
+ sbus_iommu_table_init(iommu, IO_TSB_SIZE);
+
+ control = upa_readq(iommu->iommu_control);
control = ((7UL << 16UL) |
(0UL << 2UL) |
(1UL << 1UL) |
(1UL << 0UL));
-
- /* Using the above configuration we need 1MB iommu page
- * table (128K ioptes * 8 bytes per iopte). This is
- * page order 7 on UltraSparc.
- */
- tsb_base = __get_free_pages(GFP_ATOMIC, get_order(IO_TSB_SIZE));
- if (tsb_base == 0UL) {
- prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n");
- prom_halt();
- }
-
- iommu->page_table = (iopte_t *) tsb_base;
- memset(iommu->page_table, 0, IO_TSB_SIZE);
-
- upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
+ upa_writeq(control, iommu->iommu_control);
/* Clean out any cruft in the IOMMU using
* diagnostic accesses.
*/
for (i = 0; i < 16; i++) {
- unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
- unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
+ unsigned long dram, tag;
+
+ dram = iommu->iommu_control + (IOMMU_DRAMDIAG - IOMMU_CONTROL);
+ tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
dram += (unsigned long)i * 8UL;
tag += (unsigned long)i * 8UL;
upa_writeq(0, dram);
upa_writeq(0, tag);
}
- upa_readq(iommu->sbus_control_reg);
+ upa_readq(iommu->write_complete_reg);
/* Give the TSB to SYSIO. */
- upa_writeq(__pa(tsb_base), iommu->iommu_regs + IOMMU_TSBBASE);
+ upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase);
/* Setup streaming buffer, DE=1 SB_EN=1 */
control = (1UL << 1UL) | (1UL << 0UL);
- upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
+ upa_writeq(control, strbuf->strbuf_control);
/* Clear out the tags using diagnostics. */
for (i = 0; i < 16; i++) {
unsigned long ptag, ltag;
- ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
- ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
+ ptag = strbuf->strbuf_control +
+ (STRBUF_PTAGDIAG - STRBUF_CONTROL);
+ ltag = strbuf->strbuf_control +
+ (STRBUF_LTAGDIAG - STRBUF_CONTROL);
ptag += (unsigned long)i * 8UL;
ltag += (unsigned long)i * 8UL;
@@ -1215,9 +1148,9 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
}
/* Enable DVMA arbitration for all devices/slots. */
- control = upa_readq(iommu->sbus_control_reg);
+ control = upa_readq(iommu->write_complete_reg);
control |= 0x3fUL;
- upa_writeq(control, iommu->sbus_control_reg);
+ upa_writeq(control, iommu->write_complete_reg);
/* Now some Xfire specific grot... */
if (this_is_starfire)
@@ -1229,7 +1162,7 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
void sbus_fill_device_irq(struct sbus_dev *sdev)
{
struct device_node *dp = of_find_node_by_phandle(sdev->prom_node);
- struct linux_prom_irqs *irqs;
+ const struct linux_prom_irqs *irqs;
irqs = of_get_property(dp, "interrupts", NULL);
if (!irqs) {
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index fc99f7b8012..d4f0a70f484 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -45,7 +45,7 @@
extern void calibrate_delay(void);
/* Please don't make this stuff initdata!!! --DaveM */
-static unsigned char boot_cpu_id;
+unsigned char boot_cpu_id;
cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
@@ -81,8 +81,6 @@ void __init smp_store_cpu_info(int id)
struct device_node *dp;
int def;
- /* multiplier and counter set by
- smp_setup_percpu_timer() */
cpu_data(id).udelay_val = loops_per_jiffy;
cpu_find_by_mid(id, &dp);
@@ -125,7 +123,7 @@ void __init smp_store_cpu_info(int id)
cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
}
-static void smp_setup_percpu_timer(void);
+extern void setup_sparc64_timer(void);
static volatile unsigned long callin_flag = 0;
@@ -140,7 +138,7 @@ void __init smp_callin(void)
__flush_tlb_all();
- smp_setup_percpu_timer();
+ setup_sparc64_timer();
if (cheetah_pcache_forced_on)
cheetah_enable_pcache();
@@ -177,8 +175,6 @@ void cpu_panic(void)
panic("SMP bolixed\n");
}
-static unsigned long current_tick_offset __read_mostly;
-
/* This tick register synchronization scheme is taken entirely from
* the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
*
@@ -261,7 +257,7 @@ void smp_synchronize_tick_client(void)
} else
adj = -delta;
- tick_ops->add_tick(adj, current_tick_offset);
+ tick_ops->add_tick(adj);
}
#if DEBUG_TICK_SYNC
t[i].rt = rt;
@@ -1180,117 +1176,15 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
preempt_enable();
}
-#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
-#define prof_counter(__cpu) cpu_data(__cpu).counter
-
-void smp_percpu_timer_interrupt(struct pt_regs *regs)
-{
- unsigned long compare, tick, pstate;
- int cpu = smp_processor_id();
- int user = user_mode(regs);
- struct pt_regs *old_regs;
-
- /*
- * Check for level 14 softint.
- */
- {
- unsigned long tick_mask = tick_ops->softint_mask;
-
- if (!(get_softint() & tick_mask)) {
- extern void handler_irq(int, struct pt_regs *);
-
- handler_irq(14, regs);
- return;
- }
- clear_softint(tick_mask);
- }
-
- old_regs = set_irq_regs(regs);
- do {
- profile_tick(CPU_PROFILING);
- if (!--prof_counter(cpu)) {
- irq_enter();
-
- if (cpu == boot_cpu_id) {
- kstat_this_cpu.irqs[0]++;
- timer_tick_interrupt(regs);
- }
-
- update_process_times(user);
-
- irq_exit();
-
- prof_counter(cpu) = prof_multiplier(cpu);
- }
-
- /* Guarantee that the following sequences execute
- * uninterrupted.
- */
- __asm__ __volatile__("rdpr %%pstate, %0\n\t"
- "wrpr %0, %1, %%pstate"
- : "=r" (pstate)
- : "i" (PSTATE_IE));
-
- compare = tick_ops->add_compare(current_tick_offset);
- tick = tick_ops->get_tick();
-
- /* Restore PSTATE_IE. */
- __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
- : /* no outputs */
- : "r" (pstate));
- } while (time_after_eq(tick, compare));
- set_irq_regs(old_regs);
-}
-
-static void __init smp_setup_percpu_timer(void)
-{
- int cpu = smp_processor_id();
- unsigned long pstate;
-
- prof_counter(cpu) = prof_multiplier(cpu) = 1;
-
- /* Guarantee that the following sequences execute
- * uninterrupted.
- */
- __asm__ __volatile__("rdpr %%pstate, %0\n\t"
- "wrpr %0, %1, %%pstate"
- : "=r" (pstate)
- : "i" (PSTATE_IE));
-
- tick_ops->init_tick(current_tick_offset);
-
- /* Restore PSTATE_IE. */
- __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
- : /* no outputs */
- : "r" (pstate));
-}
-
void __init smp_tick_init(void)
{
boot_cpu_id = hard_smp_processor_id();
- current_tick_offset = timer_tick_offset;
-
- prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
}
/* /proc/profile writes can call this, don't __init it please. */
-static DEFINE_SPINLOCK(prof_setup_lock);
-
int setup_profiling_timer(unsigned int multiplier)
{
- unsigned long flags;
- int i;
-
- if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
- return -EINVAL;
-
- spin_lock_irqsave(&prof_setup_lock, flags);
- for_each_possible_cpu(i)
- prof_multiplier(i) = multiplier;
- current_tick_offset = (timer_tick_offset / multiplier);
- spin_unlock_irqrestore(&prof_setup_lock, flags);
-
- return 0;
+ return -EINVAL;
}
static void __init smp_tune_scheduling(void)
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index beffc82a1e8..d00f51a5683 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -212,7 +212,6 @@ EXPORT_SYMBOL(insl);
#ifdef CONFIG_PCI
EXPORT_SYMBOL(ebus_chain);
EXPORT_SYMBOL(isa_chain);
-EXPORT_SYMBOL(pci_memspace_mask);
EXPORT_SYMBOL(pci_alloc_consistent);
EXPORT_SYMBOL(pci_free_consistent);
EXPORT_SYMBOL(pci_map_single);
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
index c09ab4b9431..010a737908e 100644
--- a/arch/sparc64/kernel/sys32.S
+++ b/arch/sparc64/kernel/sys32.S
@@ -91,7 +91,6 @@ SIGN1(sys32_select, compat_sys_select, %o0)
SIGN1(sys32_mkdir, sys_mkdir, %o1)
SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
SIGN1(sys32_sysfs, compat_sys_sysfs, %o0)
-SIGN3(sys32_ipc, compat_sys_ipc, %o1, %o2, %o3)
SIGN2(sys32_sendfile, compat_sys_sendfile, %o0, %o1)
SIGN2(sys32_sendfile64, compat_sys_sendfile64, %o0, %o1)
SIGN1(sys32_prctl, sys_prctl, %o0)
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
index 4cff95b7b3a..8f7a06e2c7e 100644
--- a/arch/sparc64/kernel/sys_sunos32.c
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -871,7 +871,7 @@ asmlinkage s32 sunos_sysconf (int name)
ret = ARG_MAX;
break;
case _SC_CHILD_MAX:
- ret = -1; /* no limit */
+ ret = current->signal->rlim[RLIMIT_NPROC].rlim_cur;
break;
case _SC_CLK_TCK:
ret = HZ;
@@ -880,7 +880,7 @@ asmlinkage s32 sunos_sysconf (int name)
ret = NGROUPS_MAX;
break;
case _SC_OPEN_MAX:
- ret = OPEN_MAX;
+ ret = current->signal->rlim[RLIMIT_NOFILE].rlim_cur;
break;
case _SC_JOB_CONTROL:
ret = 1; /* yes, we do support job control */
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index aaeb5e06735..48c36fe6dc6 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -62,7 +62,7 @@ sys_call_table32:
/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir
.word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64
/*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, compat_sys_sysinfo
- .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, compat_sys_adjtimex
+ .word compat_sys_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, compat_sys_adjtimex
/*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid
.word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16
/*230*/ .word sys32_select, compat_sys_time, sys32_splice, compat_sys_stime, compat_sys_statfs64
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index f84da4f1b70..259063f41f9 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -31,6 +31,9 @@
#include <linux/profile.h>
#include <linux/miscdevice.h>
#include <linux/rtc.h>
+#include <linux/kernel_stat.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
#include <asm/oplib.h>
#include <asm/mostek.h>
@@ -60,6 +63,7 @@ static void __iomem *mstk48t59_regs;
static int set_rtc_mmss(unsigned long);
#define TICK_PRIV_BIT (1UL << 63)
+#define TICKCMP_IRQ_BIT (1UL << 63)
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
@@ -93,21 +97,22 @@ static void tick_disable_protection(void)
: "g2");
}
-static void tick_init_tick(unsigned long offset)
+static void tick_disable_irq(void)
{
- tick_disable_protection();
-
__asm__ __volatile__(
- " rd %%tick, %%g1\n"
- " andn %%g1, %1, %%g1\n"
" ba,pt %%xcc, 1f\n"
- " add %%g1, %0, %%g1\n"
+ " nop\n"
" .align 64\n"
- "1: wr %%g1, 0x0, %%tick_cmpr\n"
+ "1: wr %0, 0x0, %%tick_cmpr\n"
" rd %%tick_cmpr, %%g0"
: /* no outputs */
- : "r" (offset), "r" (TICK_PRIV_BIT)
- : "g1");
+ : "r" (TICKCMP_IRQ_BIT));
+}
+
+static void tick_init_tick(void)
+{
+ tick_disable_protection();
+ tick_disable_irq();
}
static unsigned long tick_get_tick(void)
@@ -121,20 +126,14 @@ static unsigned long tick_get_tick(void)
return ret & ~TICK_PRIV_BIT;
}
-static unsigned long tick_get_compare(void)
+static int tick_add_compare(unsigned long adj)
{
- unsigned long ret;
+ unsigned long orig_tick, new_tick, new_compare;
- __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
- "mov %0, %0"
- : "=r" (ret));
+ __asm__ __volatile__("rd %%tick, %0"
+ : "=r" (orig_tick));
- return ret;
-}
-
-static unsigned long tick_add_compare(unsigned long adj)
-{
- unsigned long new_compare;
+ orig_tick &= ~TICKCMP_IRQ_BIT;
/* Workaround for Spitfire Errata (#54 I think??), I discovered
* this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
@@ -145,44 +144,41 @@ static unsigned long tick_add_compare(unsigned long adj)
* at the start of an I-cache line, and perform a dummy
* read back from %tick_cmpr right after writing to it. -DaveM
*/
- __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
- "ba,pt %%xcc, 1f\n\t"
- " add %0, %1, %0\n\t"
+ __asm__ __volatile__("ba,pt %%xcc, 1f\n\t"
+ " add %1, %2, %0\n\t"
".align 64\n"
"1:\n\t"
"wr %0, 0, %%tick_cmpr\n\t"
- "rd %%tick_cmpr, %%g0"
- : "=&r" (new_compare)
- : "r" (adj));
+ "rd %%tick_cmpr, %%g0\n\t"
+ : "=r" (new_compare)
+ : "r" (orig_tick), "r" (adj));
- return new_compare;
+ __asm__ __volatile__("rd %%tick, %0"
+ : "=r" (new_tick));
+ new_tick &= ~TICKCMP_IRQ_BIT;
+
+ return ((long)(new_tick - (orig_tick+adj))) > 0L;
}
-static unsigned long tick_add_tick(unsigned long adj, unsigned long offset)
+static unsigned long tick_add_tick(unsigned long adj)
{
- unsigned long new_tick, tmp;
+ unsigned long new_tick;
/* Also need to handle Blackbird bug here too. */
__asm__ __volatile__("rd %%tick, %0\n\t"
- "add %0, %2, %0\n\t"
+ "add %0, %1, %0\n\t"
"wrpr %0, 0, %%tick\n\t"
- "andn %0, %4, %1\n\t"
- "ba,pt %%xcc, 1f\n\t"
- " add %1, %3, %1\n\t"
- ".align 64\n"
- "1:\n\t"
- "wr %1, 0, %%tick_cmpr\n\t"
- "rd %%tick_cmpr, %%g0"
- : "=&r" (new_tick), "=&r" (tmp)
- : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
+ : "=&r" (new_tick)
+ : "r" (adj));
return new_tick;
}
static struct sparc64_tick_ops tick_operations __read_mostly = {
+ .name = "tick",
.init_tick = tick_init_tick,
+ .disable_irq = tick_disable_irq,
.get_tick = tick_get_tick,
- .get_compare = tick_get_compare,
.add_tick = tick_add_tick,
.add_compare = tick_add_compare,
.softint_mask = 1UL << 0,
@@ -190,7 +186,15 @@ static struct sparc64_tick_ops tick_operations __read_mostly = {
struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
-static void stick_init_tick(unsigned long offset)
+static void stick_disable_irq(void)
+{
+ __asm__ __volatile__(
+ "wr %0, 0x0, %%asr25"
+ : /* no outputs */
+ : "r" (TICKCMP_IRQ_BIT));
+}
+
+static void stick_init_tick(void)
{
/* Writes to the %tick and %stick register are not
* allowed on sun4v. The Hypervisor controls that
@@ -198,6 +202,7 @@ static void stick_init_tick(unsigned long offset)
*/
if (tlb_type != hypervisor) {
tick_disable_protection();
+ tick_disable_irq();
/* Let the user get at STICK too. */
__asm__ __volatile__(
@@ -209,14 +214,7 @@ static void stick_init_tick(unsigned long offset)
: "g1", "g2");
}
- __asm__ __volatile__(
- " rd %%asr24, %%g1\n"
- " andn %%g1, %1, %%g1\n"
- " add %%g1, %0, %%g1\n"
- " wr %%g1, 0x0, %%asr25"
- : /* no outputs */
- : "r" (offset), "r" (TICK_PRIV_BIT)
- : "g1");
+ stick_disable_irq();
}
static unsigned long stick_get_tick(void)
@@ -229,49 +227,43 @@ static unsigned long stick_get_tick(void)
return ret & ~TICK_PRIV_BIT;
}
-static unsigned long stick_get_compare(void)
+static unsigned long stick_add_tick(unsigned long adj)
{
- unsigned long ret;
-
- __asm__ __volatile__("rd %%asr25, %0"
- : "=r" (ret));
-
- return ret;
-}
-
-static unsigned long stick_add_tick(unsigned long adj, unsigned long offset)
-{
- unsigned long new_tick, tmp;
+ unsigned long new_tick;
__asm__ __volatile__("rd %%asr24, %0\n\t"
- "add %0, %2, %0\n\t"
+ "add %0, %1, %0\n\t"
"wr %0, 0, %%asr24\n\t"
- "andn %0, %4, %1\n\t"
- "add %1, %3, %1\n\t"
- "wr %1, 0, %%asr25"
- : "=&r" (new_tick), "=&r" (tmp)
- : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
+ : "=&r" (new_tick)
+ : "r" (adj));
return new_tick;
}
-static unsigned long stick_add_compare(unsigned long adj)
+static int stick_add_compare(unsigned long adj)
{
- unsigned long new_compare;
+ unsigned long orig_tick, new_tick;
- __asm__ __volatile__("rd %%asr25, %0\n\t"
- "add %0, %1, %0\n\t"
- "wr %0, 0, %%asr25"
- : "=&r" (new_compare)
- : "r" (adj));
+ __asm__ __volatile__("rd %%asr24, %0"
+ : "=r" (orig_tick));
+ orig_tick &= ~TICKCMP_IRQ_BIT;
+
+ __asm__ __volatile__("wr %0, 0, %%asr25"
+ : /* no outputs */
+ : "r" (orig_tick + adj));
+
+ __asm__ __volatile__("rd %%asr24, %0"
+ : "=r" (new_tick));
+ new_tick &= ~TICKCMP_IRQ_BIT;
- return new_compare;
+ return ((long)(new_tick - (orig_tick+adj))) > 0L;
}
static struct sparc64_tick_ops stick_operations __read_mostly = {
+ .name = "stick",
.init_tick = stick_init_tick,
+ .disable_irq = stick_disable_irq,
.get_tick = stick_get_tick,
- .get_compare = stick_get_compare,
.add_tick = stick_add_tick,
.add_compare = stick_add_compare,
.softint_mask = 1UL << 16,
@@ -320,20 +312,6 @@ static unsigned long __hbird_read_stick(void)
return ret;
}
-static unsigned long __hbird_read_compare(void)
-{
- unsigned long low, high;
- unsigned long addr = HBIRD_STICKCMP_ADDR;
-
- __asm__ __volatile__("ldxa [%2] %3, %0\n\t"
- "add %2, 0x8, %2\n\t"
- "ldxa [%2] %3, %1"
- : "=&r" (low), "=&r" (high), "=&r" (addr)
- : "i" (ASI_PHYS_BYPASS_EC_E), "2" (addr));
-
- return (high << 32UL) | low;
-}
-
static void __hbird_write_stick(unsigned long val)
{
unsigned long low = (val & 0xffffffffUL);
@@ -364,10 +342,13 @@ static void __hbird_write_compare(unsigned long val)
"i" (ASI_PHYS_BYPASS_EC_E));
}
-static void hbtick_init_tick(unsigned long offset)
+static void hbtick_disable_irq(void)
{
- unsigned long val;
+ __hbird_write_compare(TICKCMP_IRQ_BIT);
+}
+static void hbtick_init_tick(void)
+{
tick_disable_protection();
/* XXX This seems to be necessary to 'jumpstart' Hummingbird
@@ -377,8 +358,7 @@ static void hbtick_init_tick(unsigned long offset)
*/
__hbird_write_stick(__hbird_read_stick());
- val = __hbird_read_stick() & ~TICK_PRIV_BIT;
- __hbird_write_compare(val + offset);
+ hbtick_disable_irq();
}
static unsigned long hbtick_get_tick(void)
@@ -386,122 +366,95 @@ static unsigned long hbtick_get_tick(void)
return __hbird_read_stick() & ~TICK_PRIV_BIT;
}
-static unsigned long hbtick_get_compare(void)
-{
- return __hbird_read_compare();
-}
-
-static unsigned long hbtick_add_tick(unsigned long adj, unsigned long offset)
+static unsigned long hbtick_add_tick(unsigned long adj)
{
unsigned long val;
val = __hbird_read_stick() + adj;
__hbird_write_stick(val);
- val &= ~TICK_PRIV_BIT;
- __hbird_write_compare(val + offset);
-
return val;
}
-static unsigned long hbtick_add_compare(unsigned long adj)
+static int hbtick_add_compare(unsigned long adj)
{
- unsigned long val = __hbird_read_compare() + adj;
+ unsigned long val = __hbird_read_stick();
+ unsigned long val2;
- val &= ~TICK_PRIV_BIT;
+ val &= ~TICKCMP_IRQ_BIT;
+ val += adj;
__hbird_write_compare(val);
- return val;
+ val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT;
+
+ return ((long)(val2 - val)) > 0L;
}
static struct sparc64_tick_ops hbtick_operations __read_mostly = {
+ .name = "hbtick",
.init_tick = hbtick_init_tick,
+ .disable_irq = hbtick_disable_irq,
.get_tick = hbtick_get_tick,
- .get_compare = hbtick_get_compare,
.add_tick = hbtick_add_tick,
.add_compare = hbtick_add_compare,
.softint_mask = 1UL << 0,
};
-/* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
- *
- * NOTE: On SUN5 systems the ticker interrupt comes in using 2
- * interrupts, one at level14 and one with softint bit 0.
- */
-unsigned long timer_tick_offset __read_mostly;
-
static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
#define TICK_SIZE (tick_nsec / 1000)
-static inline void timer_check_rtc(void)
-{
- /* last time the cmos clock got updated */
- static long last_rtc_update;
-
- /* Determine when to update the Mostek clock. */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600;
- /* do it again in 60 s */
- }
-}
+#define USEC_AFTER 500000
+#define USEC_BEFORE 500000
-irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- unsigned long ticks, compare, pstate;
+static void sync_cmos_clock(unsigned long dummy);
- write_seqlock(&xtime_lock);
+static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
- do {
-#ifndef CONFIG_SMP
- profile_tick(CPU_PROFILING);
- update_process_times(user_mode(get_irq_regs()));
-#endif
- do_timer(1);
+static void sync_cmos_clock(unsigned long dummy)
+{
+ struct timeval now, next;
+ int fail = 1;
- /* Guarantee that the following sequences execute
- * uninterrupted.
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+ * called as close as possible to 500 ms before the new second starts.
+ * This code is run on a timer. If the clock is set, that timer
+ * may not expire at the correct time. Thus, we adjust...
+ */
+ if (!ntp_synced())
+ /*
+ * Not synced, exit, do not restart a timer (if one is
+ * running, let it run out).
*/
- __asm__ __volatile__("rdpr %%pstate, %0\n\t"
- "wrpr %0, %1, %%pstate"
- : "=r" (pstate)
- : "i" (PSTATE_IE));
+ return;
- compare = tick_ops->add_compare(timer_tick_offset);
- ticks = tick_ops->get_tick();
+ do_gettimeofday(&now);
+ if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
+ now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
+ fail = set_rtc_mmss(now.tv_sec);
- /* Restore PSTATE_IE. */
- __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
- : /* no outputs */
- : "r" (pstate));
- } while (time_after_eq(ticks, compare));
+ next.tv_usec = USEC_AFTER - now.tv_usec;
+ if (next.tv_usec <= 0)
+ next.tv_usec += USEC_PER_SEC;
- timer_check_rtc();
+ if (!fail)
+ next.tv_sec = 659;
+ else
+ next.tv_sec = 0;
- write_sequnlock(&xtime_lock);
-
- return IRQ_HANDLED;
+ if (next.tv_usec >= USEC_PER_SEC) {
+ next.tv_sec++;
+ next.tv_usec -= USEC_PER_SEC;
+ }
+ mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
}
-#ifdef CONFIG_SMP
-void timer_tick_interrupt(struct pt_regs *regs)
+void notify_arch_cmos_timer(void)
{
- write_seqlock(&xtime_lock);
-
- do_timer(1);
-
- timer_check_rtc();
-
- write_sequnlock(&xtime_lock);
+ mod_timer(&sync_cmos_timer, jiffies + 1);
}
-#endif
/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
static void __init kick_start_clock(void)
@@ -751,7 +704,7 @@ retry:
return -EOPNOTSUPP;
}
-static int __init clock_model_matches(char *model)
+static int __init clock_model_matches(const char *model)
{
if (strcmp(model, "mk48t02") &&
strcmp(model, "mk48t08") &&
@@ -768,7 +721,7 @@ static int __init clock_model_matches(char *model)
static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
{
struct device_node *dp = op->node;
- char *model = of_get_property(dp, "model", NULL);
+ const char *model = of_get_property(dp, "model", NULL);
unsigned long size, flags;
void __iomem *regs;
@@ -900,7 +853,6 @@ static unsigned long sparc64_init_timers(void)
prop = of_find_property(dp, "stick-frequency", NULL);
}
clock = *(unsigned int *) prop->value;
- timer_tick_offset = clock / HZ;
#ifdef CONFIG_SMP
smp_tick_init();
@@ -909,26 +861,6 @@ static unsigned long sparc64_init_timers(void)
return clock;
}
-static void sparc64_start_timers(void)
-{
- unsigned long pstate;
-
- /* Guarantee that the following sequences execute
- * uninterrupted.
- */
- __asm__ __volatile__("rdpr %%pstate, %0\n\t"
- "wrpr %0, %1, %%pstate"
- : "=r" (pstate)
- : "i" (PSTATE_IE));
-
- tick_ops->init_tick(timer_tick_offset);
-
- /* Restore PSTATE_IE. */
- __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
- : /* no outputs */
- : "r" (pstate));
-}
-
struct freq_table {
unsigned long clock_tick_ref;
unsigned int ref_freq;
@@ -975,29 +907,148 @@ static struct notifier_block sparc64_cpufreq_notifier_block = {
#endif /* CONFIG_CPU_FREQ */
-static struct time_interpolator sparc64_cpu_interpolator = {
- .source = TIME_SOURCE_CPU,
- .shift = 16,
- .mask = 0xffffffffffffffffLL
+static int sparc64_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ return tick_ops->add_compare(delta) ? -ETIME : 0;
+}
+
+static void sparc64_timer_setup(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ break;
+
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ tick_ops->disable_irq();
+ break;
+
+ case CLOCK_EVT_MODE_PERIODIC:
+ case CLOCK_EVT_MODE_UNUSED:
+ WARN_ON(1);
+ break;
+ };
+}
+
+static struct clock_event_device sparc64_clockevent = {
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = sparc64_timer_setup,
+ .set_next_event = sparc64_next_event,
+ .rating = 100,
+ .shift = 30,
+ .irq = -1,
};
+static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
-/* The quotient formula is taken from the IA64 port. */
-#define SPARC64_NSEC_PER_CYC_SHIFT 10UL
-void __init time_init(void)
+void timer_interrupt(int irq, struct pt_regs *regs)
{
- unsigned long clock = sparc64_init_timers();
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ unsigned long tick_mask = tick_ops->softint_mask;
+ int cpu = smp_processor_id();
+ struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
+
+ clear_softint(tick_mask);
+
+ irq_enter();
- sparc64_cpu_interpolator.frequency = clock;
- register_time_interpolator(&sparc64_cpu_interpolator);
+ kstat_this_cpu.irqs[0]++;
- /* Now that the interpolator is registered, it is
- * safe to start the timer ticking.
+ if (unlikely(!evt->event_handler)) {
+ printk(KERN_WARNING
+ "Spurious SPARC64 timer interrupt on cpu %d\n", cpu);
+ } else
+ evt->event_handler(evt);
+
+ irq_exit();
+
+ set_irq_regs(old_regs);
+}
+
+void __devinit setup_sparc64_timer(void)
+{
+ struct clock_event_device *sevt;
+ unsigned long pstate;
+
+ /* Guarantee that the following sequences execute
+ * uninterrupted.
*/
- sparc64_start_timers();
+ __asm__ __volatile__("rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate"
+ : "=r" (pstate)
+ : "i" (PSTATE_IE));
+
+ tick_ops->init_tick();
+
+ /* Restore PSTATE_IE. */
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+ : /* no outputs */
+ : "r" (pstate));
+
+ sevt = &__get_cpu_var(sparc64_events);
+
+ memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
+ sevt->cpumask = cpumask_of_cpu(smp_processor_id());
+
+ clockevents_register_device(sevt);
+}
+
+#define SPARC64_NSEC_PER_CYC_SHIFT 32UL
+
+static struct clocksource clocksource_tick = {
+ .rating = 100,
+ .mask = CLOCKSOURCE_MASK(64),
+ .shift = 16,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void __init setup_clockevent_multiplier(unsigned long hz)
+{
+ unsigned long mult, shift = 32;
+
+ while (1) {
+ mult = div_sc(hz, NSEC_PER_SEC, shift);
+ if (mult && (mult >> 32UL) == 0UL)
+ break;
+
+ shift--;
+ }
+
+ sparc64_clockevent.shift = shift;
+ sparc64_clockevent.mult = mult;
+}
+
+void __init time_init(void)
+{
+ unsigned long clock = sparc64_init_timers();
timer_ticks_per_nsec_quotient =
- (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) +
- (clock / 2)) / clock);
+ clocksource_hz2mult(clock, SPARC64_NSEC_PER_CYC_SHIFT);
+
+ clocksource_tick.name = tick_ops->name;
+ clocksource_tick.mult =
+ clocksource_hz2mult(clock,
+ clocksource_tick.shift);
+ clocksource_tick.read = tick_ops->get_tick;
+
+ printk("clocksource: mult[%x] shift[%d]\n",
+ clocksource_tick.mult, clocksource_tick.shift);
+
+ clocksource_register(&clocksource_tick);
+
+ sparc64_clockevent.name = tick_ops->name;
+
+ setup_clockevent_multiplier(clock);
+
+ sparc64_clockevent.max_delta_ns =
+ clockevent_delta2ns(0x7fffffffffffffff, &sparc64_clockevent);
+ sparc64_clockevent.min_delta_ns =
+ clockevent_delta2ns(0xF, &sparc64_clockevent);
+
+ printk("clockevent: mult[%lx] shift[%d]\n",
+ sparc64_clockevent.mult, sparc64_clockevent.shift);
+
+ setup_sparc64_timer();
#ifdef CONFIG_CPU_FREQ
cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
@@ -1126,10 +1177,6 @@ static int set_rtc_mmss(unsigned long nowtime)
#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
static unsigned char mini_rtc_status; /* bitmapped status byte. */
-/* months start at 0 now */
-static unsigned char days_in_mo[] =
-{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-
#define FEBRUARY 2
#define STARTOFTIME 1970
#define SECDAY 86400L
@@ -1278,8 +1325,7 @@ static int mini_rtc_ioctl(struct inode *inode, struct file *file,
case RTC_SET_TIME: /* Set the RTC */
{
- int year;
- unsigned char leap_yr;
+ int year, days;
if (!capable(CAP_SYS_TIME))
return -EACCES;
@@ -1288,14 +1334,14 @@ static int mini_rtc_ioctl(struct inode *inode, struct file *file,
return -EFAULT;
year = wtime.tm_year + 1900;
- leap_yr = ((!(year % 4) && (year % 100)) ||
- !(year % 400));
+ days = month_days[wtime.tm_mon] +
+ ((wtime.tm_mon == 1) && leapyear(year));
- if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1))
+ if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) ||
+ (wtime.tm_mday < 1))
return -EINVAL;
- if (wtime.tm_mday < 0 || wtime.tm_mday >
- (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr)))
+ if (wtime.tm_mday < 0 || wtime.tm_mday > days)
return -EINVAL;
if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 ||
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index d7d2a8bdc66..7575aa371da 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -60,11 +60,7 @@ tl0_irq4: BTRAP(0x44)
tl0_irq5: TRAP_IRQ(handler_irq, 5)
tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49)
tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
-#ifndef CONFIG_SMP
-tl0_irq14: TRAP_IRQ(timer_irq, 14)
-#else
-tl0_irq14: TICK_SMP_IRQ
-#endif
+tl0_irq14: TRAP_IRQ(timer_interrupt, 14)
tl0_irq15: TRAP_IRQ(handler_irq, 15)
tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55)
tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b)
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index f146071a4b2..cafadcbcdf3 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -122,24 +122,19 @@ static void __init read_obp_memory(const char *property,
size = 0UL;
base = new_base;
}
- regs[i].phys_addr = base;
- regs[i].reg_size = size;
- }
-
- for (i = 0; i < ents; i++) {
- if (regs[i].reg_size == 0UL) {
- int j;
-
- for (j = i; j < ents - 1; j++) {
- regs[j].phys_addr =
- regs[j+1].phys_addr;
- regs[j].reg_size =
- regs[j+1].reg_size;
- }
-
- ents--;
+ if (size == 0UL) {
+ /* If it is empty, simply get rid of it.
+ * This simplifies the logic of the other
+ * functions that process these arrays.
+ */
+ memmove(&regs[i], &regs[i + 1],
+ (ents - i - 1) * sizeof(regs[0]));
i--;
+ ents--;
+ continue;
}
+ regs[i].phys_addr = base;
+ regs[i].reg_size = size;
}
*num_ents = ents;
@@ -154,15 +149,6 @@ unsigned long *sparc64_valid_addr_bitmap __read_mostly;
unsigned long kern_base __read_mostly;
unsigned long kern_size __read_mostly;
-/* get_new_mmu_context() uses "cache + 1". */
-DEFINE_SPINLOCK(ctx_alloc_lock);
-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
-#define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
-unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
-
-/* References to special section boundaries */
-extern char _start[], _end[];
-
/* Initial ramdisk setup */
extern unsigned long sparc_ramdisk_image64;
extern unsigned int sparc_ramdisk_image;
@@ -406,19 +392,70 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
if (tlb_type == spitfire) {
unsigned long kaddr;
- for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
- __flush_icache_page(__get_phys(kaddr));
+ /* This code only runs on Spitfire cpus so this is
+ * why we can assume _PAGE_PADDR_4U.
+ */
+ for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
+ unsigned long paddr, mask = _PAGE_PADDR_4U;
+
+ if (kaddr >= PAGE_OFFSET)
+ paddr = kaddr & mask;
+ else {
+ pgd_t *pgdp = pgd_offset_k(kaddr);
+ pud_t *pudp = pud_offset(pgdp, kaddr);
+ pmd_t *pmdp = pmd_offset(pudp, kaddr);
+ pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
+
+ paddr = pte_val(*ptep) & mask;
+ }
+ __flush_icache_page(paddr);
+ }
}
}
void show_mem(void)
{
- printk("Mem-info:\n");
+ unsigned long total = 0, reserved = 0;
+ unsigned long shared = 0, cached = 0;
+ pg_data_t *pgdat;
+
+ printk(KERN_INFO "Mem-info:\n");
show_free_areas();
- printk("Free swap: %6ldkB\n",
+ printk(KERN_INFO "Free swap: %6ldkB\n",
nr_swap_pages << (PAGE_SHIFT-10));
- printk("%ld pages of RAM\n", num_physpages);
- printk("%lu free pages\n", nr_free_pages());
+ for_each_online_pgdat(pgdat) {
+ unsigned long i, flags;
+
+ pgdat_resize_lock(pgdat, &flags);
+ for (i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page = pgdat_page_nr(pgdat, i);
+ total++;
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (page_count(page))
+ shared += page_count(page) - 1;
+ }
+ pgdat_resize_unlock(pgdat, &flags);
+ }
+
+ printk(KERN_INFO "%lu pages of RAM\n", total);
+ printk(KERN_INFO "%lu reserved pages\n", reserved);
+ printk(KERN_INFO "%lu pages shared\n", shared);
+ printk(KERN_INFO "%lu pages swap cached\n", cached);
+
+ printk(KERN_INFO "%lu pages dirty\n",
+ global_page_state(NR_FILE_DIRTY));
+ printk(KERN_INFO "%lu pages writeback\n",
+ global_page_state(NR_WRITEBACK));
+ printk(KERN_INFO "%lu pages mapped\n",
+ global_page_state(NR_FILE_MAPPED));
+ printk(KERN_INFO "%lu pages slab\n",
+ global_page_state(NR_SLAB_RECLAIMABLE) +
+ global_page_state(NR_SLAB_UNRECLAIMABLE));
+ printk(KERN_INFO "%lu pages pagetables\n",
+ global_page_state(NR_PAGETABLE));
}
void mmu_info(struct seq_file *m)
@@ -658,6 +695,13 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
}
#endif /* DCACHE_ALIASING_POSSIBLE */
+/* get_new_mmu_context() uses "cache + 1". */
+DEFINE_SPINLOCK(ctx_alloc_lock);
+unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+#define MAX_CTX_NR (1UL << CTX_NR_BITS)
+#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
+DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+
/* Caller does TLB context flushing on local CPU if necessary.
* The caller also ensures that CTX_VALID(mm->context) is false.
*
@@ -717,95 +761,6 @@ out:
smp_new_mmu_context_version();
}
-void sparc_ultra_dump_itlb(void)
-{
- int slot;
-
- if (tlb_type == spitfire) {
- printk ("Contents of itlb: ");
- for (slot = 0; slot < 14; slot++) printk (" ");
- printk ("%2x:%016lx,%016lx\n",
- 0,
- spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
- for (slot = 1; slot < 64; slot+=3) {
- printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
- slot,
- spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
- slot+1,
- spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
- slot+2,
- spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
- }
- } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
- printk ("Contents of itlb0:\n");
- for (slot = 0; slot < 16; slot+=2) {
- printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
- slot,
- cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
- slot+1,
- cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
- }
- printk ("Contents of itlb2:\n");
- for (slot = 0; slot < 128; slot+=2) {
- printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
- slot,
- cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
- slot+1,
- cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
- }
- }
-}
-
-void sparc_ultra_dump_dtlb(void)
-{
- int slot;
-
- if (tlb_type == spitfire) {
- printk ("Contents of dtlb: ");
- for (slot = 0; slot < 14; slot++) printk (" ");
- printk ("%2x:%016lx,%016lx\n", 0,
- spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
- for (slot = 1; slot < 64; slot+=3) {
- printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
- slot,
- spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
- slot+1,
- spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
- slot+2,
- spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
- }
- } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
- printk ("Contents of dtlb0:\n");
- for (slot = 0; slot < 16; slot+=2) {
- printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
- slot,
- cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
- slot+1,
- cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
- }
- printk ("Contents of dtlb2:\n");
- for (slot = 0; slot < 512; slot+=2) {
- printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
- slot,
- cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
- slot+1,
- cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
- }
- if (tlb_type == cheetah_plus) {
- printk ("Contents of dtlb3:\n");
- for (slot = 0; slot < 512; slot+=2) {
- printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
- slot,
- cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
- slot+1,
- cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
- }
- }
- }
-}
-
-extern unsigned long cmdline_memory_size;
-
/* Find a free area for the bootmem map, avoiding the kernel image
* and the initial ramdisk.
*/
@@ -815,8 +770,8 @@ static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,
unsigned long avoid_start, avoid_end, bootmap_size;
int i;
- bootmap_size = ((end_pfn - start_pfn) + 7) / 8;
- bootmap_size = ALIGN(bootmap_size, sizeof(long));
+ bootmap_size = bootmem_bootmap_pages(end_pfn - start_pfn);
+ bootmap_size <<= PAGE_SHIFT;
avoid_start = avoid_end = 0;
#ifdef CONFIG_BLK_DEV_INITRD
@@ -983,6 +938,20 @@ static void __init trim_pavail(unsigned long *cur_size_p,
}
}
+/* About pages_avail, this is the value we will use to calculate
+ * the zholes_size[] argument given to free_area_init_node(). The
+ * page allocator uses this to calculate nr_kernel_pages,
+ * nr_all_pages and zone->present_pages. On NUMA it is used
+ * to calculate zone->min_unmapped_pages and zone->min_slab_pages.
+ *
+ * So this number should really be set to what the page allocator
+ * actually ends up with. This means:
+ * 1) It should include bootmem map pages, we'll release those.
+ * 2) It should not include the kernel image, except for the
+ * __init sections which we will also release.
+ * 3) It should include the initrd image, since we'll release
+ * that too.
+ */
static unsigned long __init bootmem_init(unsigned long *pages_avail,
unsigned long phys_base)
{
@@ -1069,7 +1038,6 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
initrd_start, initrd_end);
#endif
reserve_bootmem(initrd_start, size);
- *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
initrd_start += PAGE_OFFSET;
initrd_end += PAGE_OFFSET;
@@ -1082,6 +1050,11 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
reserve_bootmem(kern_base, kern_size);
*pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
+ /* Add back in the initmem pages. */
+ size = ((unsigned long)(__init_end) & PAGE_MASK) -
+ PAGE_ALIGN((unsigned long)__init_begin);
+ *pages_avail += size >> PAGE_SHIFT;
+
/* Reserve the bootmem map. We do not account for it
* in pages_avail because we will release that memory
* in free_all_bootmem.
@@ -1092,7 +1065,6 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
(bootmap_pfn << PAGE_SHIFT), size);
#endif
reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
- *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
for (i = 0; i < pavail_ents; i++) {
unsigned long start_pfn, end_pfn;
@@ -1584,6 +1556,10 @@ void __init mem_init(void)
#ifdef CONFIG_DEBUG_BOOTMEM
prom_printf("mem_init: Calling free_all_bootmem().\n");
#endif
+
+ /* We subtract one to account for the mem_map_zero page
+ * allocated below.
+ */
totalram_pages = num_physpages = free_all_bootmem() - 1;
/*
@@ -1883,62 +1859,6 @@ static unsigned long kern_large_tte(unsigned long paddr)
return val | paddr;
}
-/*
- * Translate PROM's mapping we capture at boot time into physical address.
- * The second parameter is only set from prom_callback() invocations.
- */
-unsigned long prom_virt_to_phys(unsigned long promva, int *error)
-{
- unsigned long mask;
- int i;
-
- mask = _PAGE_PADDR_4U;
- if (tlb_type == hypervisor)
- mask = _PAGE_PADDR_4V;
-
- for (i = 0; i < prom_trans_ents; i++) {
- struct linux_prom_translation *p = &prom_trans[i];
-
- if (promva >= p->virt &&
- promva < (p->virt + p->size)) {
- unsigned long base = p->data & mask;
-
- if (error)
- *error = 0;
- return base + (promva & (8192 - 1));
- }
- }
- if (error)
- *error = 1;
- return 0UL;
-}
-
-/* XXX We should kill off this ugly thing at so me point. XXX */
-unsigned long sun4u_get_pte(unsigned long addr)
-{
- pgd_t *pgdp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
- unsigned long mask = _PAGE_PADDR_4U;
-
- if (tlb_type == hypervisor)
- mask = _PAGE_PADDR_4V;
-
- if (addr >= PAGE_OFFSET)
- return addr & mask;
-
- if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
- return prom_virt_to_phys(addr, NULL);
-
- pgdp = pgd_offset_k(addr);
- pudp = pud_offset(pgdp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset_kernel(pmdp, addr);
-
- return pte_val(*ptep) & mask;
-}
-
/* If not locked, zap it. */
void __flush_tlb_all(void)
{
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c
index bca16e8c95c..542c808ec2c 100644
--- a/arch/sparc64/solaris/misc.c
+++ b/arch/sparc64/solaris/misc.c
@@ -224,7 +224,8 @@ static char *serial(char *buffer, int sz)
*buffer = 0;
if (dp) {
- char *val = of_get_property(dp, "system-board-serial#", &len);
+ const char *val =
+ of_get_property(dp, "system-board-serial#", &len);
if (val && len > 0) {
if (len > sz)
@@ -363,8 +364,10 @@ asmlinkage int solaris_sysconf(int id)
{
switch (id) {
case SOLARIS_CONFIG_NGROUPS: return NGROUPS_MAX;
- case SOLARIS_CONFIG_CHILD_MAX: return -1; /* no limit */
- case SOLARIS_CONFIG_OPEN_FILES: return OPEN_MAX;
+ case SOLARIS_CONFIG_CHILD_MAX:
+ return current->signal->rlim[RLIMIT_NPROC].rlim_cur;
+ case SOLARIS_CONFIG_OPEN_FILES:
+ return current->signal->rlim[RLIMIT_NOFILE].rlim_cur;
case SOLARIS_CONFIG_POSIX_VER: return 199309;
case SOLARIS_CONFIG_PAGESIZE: return PAGE_SIZE;
case SOLARIS_CONFIG_XOPEN_VER: return 3;