aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/Kconfig2
-rw-r--r--arch/sparc64/defconfig38
-rw-r--r--arch/sparc64/kernel/Makefile1
-rw-r--r--arch/sparc64/kernel/auxio.c4
-rw-r--r--arch/sparc64/kernel/entry.S14
-rw-r--r--arch/sparc64/kernel/iommu.c39
-rw-r--r--arch/sparc64/kernel/irq.c610
-rw-r--r--arch/sparc64/kernel/kprobes.c2
-rw-r--r--arch/sparc64/kernel/ktlb.S16
-rw-r--r--arch/sparc64/kernel/of_device.c5
-rw-r--r--arch/sparc64/kernel/pci.c60
-rw-r--r--arch/sparc64/kernel/pci_fire.c279
-rw-r--r--arch/sparc64/kernel/pci_impl.h32
-rw-r--r--arch/sparc64/kernel/pci_msi.c433
-rw-r--r--arch/sparc64/kernel/pci_psycho.c6
-rw-r--r--arch/sparc64/kernel/pci_schizo.c3
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c437
-rw-r--r--arch/sparc64/kernel/power.c4
-rw-r--r--arch/sparc64/kernel/smp.c17
-rw-r--r--arch/sparc64/kernel/sun4v_ivec.S22
-rw-r--r--arch/sparc64/kernel/sys_sparc.c15
-rw-r--r--arch/sparc64/kernel/time.c4
-rw-r--r--arch/sparc64/kernel/traps.c4
-rw-r--r--arch/sparc64/kernel/us2e_cpufreq.c1
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S211
-rw-r--r--arch/sparc64/lib/xor.S12
-rw-r--r--arch/sparc64/mm/fault.c2
-rw-r--r--arch/sparc64/mm/init.c59
28 files changed, 1464 insertions, 868 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 33dabf588bdd..2f22fa90461a 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -240,10 +240,10 @@ config ARCH_SELECT_MEMORY_MODEL
config ARCH_SPARSEMEM_ENABLE
def_bool y
+ select SPARSEMEM_VMEMMAP_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y
- select SPARSEMEM_STATIC
source "mm/Kconfig"
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 7d07297db878..1aa2c4048e4b 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.23-rc6
-# Sun Sep 16 09:52:11 2007
+# Linux kernel version: 2.6.23
+# Sat Oct 13 21:53:54 2007
#
CONFIG_SPARC=y
CONFIG_SPARC64=y
@@ -69,7 +69,6 @@ CONFIG_FUTEX=y
CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
-CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_VM_EVENT_COUNTERS=y
@@ -89,6 +88,7 @@ CONFIG_KMOD=y
CONFIG_BLOCK=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_BLK_DEV_BSG=y
+CONFIG_BLOCK_COMPAT=y
#
# IO Schedulers
@@ -111,6 +111,7 @@ CONFIG_GENERIC_HARDIRQS=y
CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
# CONFIG_SMP is not set
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_TABLE=m
@@ -119,6 +120,8 @@ CONFIG_CPU_FREQ_STAT=m
CONFIG_CPU_FREQ_STAT_DETAILS=y
CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
CONFIG_CPU_FREQ_GOV_USERSPACE=m
@@ -213,6 +216,7 @@ CONFIG_INET_TUNNEL=y
CONFIG_INET_XFRM_MODE_TRANSPORT=y
CONFIG_INET_XFRM_MODE_TUNNEL=y
CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_LRO=y
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
@@ -304,6 +308,7 @@ CONFIG_NET_TCPPROBE=m
#
# Generic Driver Options
#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_FW_LOADER=y
@@ -355,6 +360,11 @@ CONFIG_IDE_PROC_FS=y
# IDE chipset support/bugfixes
#
CONFIG_IDE_GENERIC=y
+# CONFIG_BLK_DEV_PLATFORM is not set
+
+#
+# PCI IDE chipsets support
+#
CONFIG_BLK_DEV_IDEPCI=y
# CONFIG_IDEPCI_SHARE_IRQ is not set
CONFIG_IDEPCI_PCIBUS_ORDER=y
@@ -391,7 +401,6 @@ CONFIG_BLK_DEV_ALI15X3=y
# CONFIG_BLK_DEV_TC86C001 is not set
# CONFIG_IDE_ARM is not set
CONFIG_BLK_DEV_IDEDMA=y
-# CONFIG_IDEDMA_IVB is not set
# CONFIG_BLK_DEV_HD is not set
#
@@ -505,6 +514,8 @@ CONFIG_DUMMY=m
# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_IP1000 is not set
# CONFIG_ARCNET is not set
# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
@@ -518,13 +529,16 @@ CONFIG_CASSINI=m
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
-# CONFIG_DGRS is not set
# CONFIG_EEPRO100 is not set
# CONFIG_E100 is not set
# CONFIG_FEALNX is not set
@@ -543,6 +557,7 @@ CONFIG_NETDEV_1000=y
CONFIG_E1000=m
CONFIG_E1000_NAPI=y
# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
+# CONFIG_E1000E is not set
# CONFIG_MYRI_SBUS is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
@@ -560,11 +575,14 @@ CONFIG_BNX2=m
CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
# CONFIG_CHELSIO_T3 is not set
+# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
# CONFIG_MYRI10GE is not set
# CONFIG_NETXEN_NIC is not set
+# CONFIG_NIU is not set
# CONFIG_MLX4_CORE is not set
+# CONFIG_TEHUTI is not set
# CONFIG_TR is not set
#
@@ -820,6 +838,12 @@ CONFIG_HWMON=y
# CONFIG_HWMON_DEBUG_CHIP is not set
#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
# Multifunction device drivers
#
# CONFIG_MFD_SM501 is not set
@@ -1399,6 +1423,7 @@ CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m
CONFIG_CRYPTO=y
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_AEAD=m
CONFIG_CRYPTO_BLKCIPHER=y
CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_MANAGER=y
@@ -1417,6 +1442,7 @@ CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_XTS=m
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_DES=y
CONFIG_CRYPTO_FCRYPT=m
@@ -1431,11 +1457,13 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_HW=y
#
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 40d2f3aae91e..112c46e66578 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o \
pci_psycho.o pci_sabre.o pci_schizo.o \
pci_sun4v.o pci_sun4v_asm.o pci_fire.o
+obj-$(CONFIG_PCI_MSI) += pci_msi.o
obj-$(CONFIG_SMP) += smp.o trampoline.o hvtramp.o
obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o
obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
index 7b379761e9f8..c55f0293eacd 100644
--- a/arch/sparc64/kernel/auxio.c
+++ b/arch/sparc64/kernel/auxio.c
@@ -148,9 +148,11 @@ static int __devinit auxio_probe(struct of_device *dev, const struct of_device_i
}
static struct of_platform_driver auxio_driver = {
- .name = "auxio",
.match_table = auxio_match,
.probe = auxio_probe,
+ .driver = {
+ .name = "auxio",
+ },
};
static int __init auxio_init(void)
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 8059531bf0ac..c9b0d7af64ae 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -429,16 +429,16 @@ do_ivec:
stxa %g0, [%g0] ASI_INTR_RECEIVE
membar #Sync
- sethi %hi(ivector_table), %g2
- sllx %g3, 3, %g3
- or %g2, %lo(ivector_table), %g2
+ sethi %hi(ivector_table_pa), %g2
+ ldx [%g2 + %lo(ivector_table_pa)], %g2
+ sllx %g3, 4, %g3
add %g2, %g3, %g3
- TRAP_LOAD_IRQ_WORK(%g6, %g1)
+ TRAP_LOAD_IRQ_WORK_PA(%g6, %g1)
- lduw [%g6], %g5 /* g5 = irq_work(cpu) */
- stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */
- stw %g3, [%g6] /* irq_work(cpu) = bucket */
+ ldx [%g6], %g5
+ stxa %g5, [%g3] ASI_PHYS_USE_EC
+ stx %g3, [%g6]
wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
retry
do_ivec_xcall:
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index b35a62167e9c..db3ffcf7a120 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
+#include <linux/scatterlist.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
@@ -480,7 +481,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
unsigned long iopte_protection)
{
struct scatterlist *dma_sg = sg;
- struct scatterlist *sg_end = sg + nelems;
+ struct scatterlist *sg_end = sg_last(sg, nelems);
int i;
for (i = 0; i < nused; i++) {
@@ -515,7 +516,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
break;
}
- sg++;
+ sg = sg_next(sg);
}
pteval = iopte_protection | (pteval & IOPTE_PAGE);
@@ -528,24 +529,24 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
}
pteval = (pteval & IOPTE_PAGE) + len;
- sg++;
+ sg = sg_next(sg);
/* Skip over any tail mappings we've fully mapped,
* adjusting pteval along the way. Stop when we
* detect a page crossing event.
*/
- while (sg < sg_end &&
+ while (sg != sg_end &&
(pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
((pteval ^
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
pteval += sg->length;
- sg++;
+ sg = sg_next(sg);
}
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
pteval = ~0UL;
} while (dma_npages != 0);
- dma_sg++;
+ dma_sg = sg_next(dma_sg);
}
}
@@ -606,7 +607,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
sgtmp = sglist;
while (used && sgtmp->dma_length) {
sgtmp->dma_address += dma_base;
- sgtmp++;
+ sgtmp = sg_next(sgtmp);
used--;
}
used = nelems - used;
@@ -642,6 +643,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, ctx, i, npages;
+ struct scatterlist *sg, *sgprv;
u32 bus_addr;
if (unlikely(direction == DMA_NONE)) {
@@ -654,11 +656,14 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
bus_addr = sglist->dma_address & IO_PAGE_MASK;
- for (i = 1; i < nelems; i++)
- if (sglist[i].dma_length == 0)
+ sgprv = NULL;
+ for_each_sg(sglist, sg, nelems, i) {
+ if (sg->dma_length == 0)
break;
- i--;
- npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
+ sgprv = sg;
+ }
+
+ npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
bus_addr) >> IO_PAGE_SHIFT;
base = iommu->page_table +
@@ -730,6 +735,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long flags, ctx, npages, i;
+ struct scatterlist *sg, *sgprv;
u32 bus_addr;
iommu = dev->archdata.iommu;
@@ -753,11 +759,14 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
/* Step 2: Kick data out of streaming buffers. */
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
- for(i = 1; i < nelems; i++)
- if (!sglist[i].dma_length)
+ sgprv = NULL;
+ for_each_sg(sglist, sg, nelems, i) {
+ if (sg->dma_length == 0)
break;
- i--;
- npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
+ sgprv = sg;
+ }
+
+ npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
- bus_addr) >> IO_PAGE_SHIFT;
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 23956096b3bf..f3922e5a89f6 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -21,7 +21,6 @@
#include <linux/seq_file.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
-#include <linux/msi.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
@@ -43,6 +42,7 @@
#include <asm/auxio.h>
#include <asm/head.h>
#include <asm/hypervisor.h>
+#include <asm/cacheflush.h>
/* UPA nodes send interrupt packet to UltraSparc with first data reg
* value low 5 (7 on Starfire) bits holding the IRQ identifier being
@@ -52,86 +52,128 @@
* To make processing these packets efficient and race free we use
* an array of irq buckets below. The interrupt vector handler in
* entry.S feeds incoming packets into per-cpu pil-indexed lists.
- * The IVEC handler does not need to act atomically, the PIL dispatch
- * code uses CAS to get an atomic snapshot of the list and clear it
- * at the same time.
*
* If you make changes to ino_bucket, please update hand coded assembler
* of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
*/
struct ino_bucket {
- /* Next handler in per-CPU IRQ worklist. We know that
- * bucket pointers have the high 32-bits clear, so to
- * save space we only store the bits we need.
- */
-/*0x00*/unsigned int irq_chain;
+/*0x00*/unsigned long __irq_chain_pa;
/* Virtual interrupt number assigned to this INO. */
-/*0x04*/unsigned int virt_irq;
+/*0x08*/unsigned int __virt_irq;
+/*0x0c*/unsigned int __pad;
};
#define NUM_IVECS (IMAP_INR + 1)
-struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
-
-#define __irq_ino(irq) \
- (((struct ino_bucket *)(unsigned long)(irq)) - &ivector_table[0])
-#define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq))
-#define __irq(bucket) ((unsigned int)(unsigned long)(bucket))
-
-/* This has to be in the main kernel image, it cannot be
- * turned into per-cpu data. The reason is that the main
- * kernel image is locked into the TLB and this structure
- * is accessed from the vectored interrupt trap handler. If
- * access to this structure takes a TLB miss it could cause
- * the 5-level sparc v9 trap stack to overflow.
+struct ino_bucket *ivector_table;
+unsigned long ivector_table_pa;
+
+/* On several sun4u processors, it is illegal to mix bypass and
+ * non-bypass accesses. Therefore we access all INO buckets
+ * using bypass accesses only.
*/
-#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
+static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=&r" (ret)
+ : "r" (bucket_pa +
+ offsetof(struct ino_bucket,
+ __irq_chain_pa)),
+ "i" (ASI_PHYS_USE_EC));
+
+ return ret;
+}
+
+static void bucket_clear_chain_pa(unsigned long bucket_pa)
+{
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : /* no outputs */
+ : "r" (bucket_pa +
+ offsetof(struct ino_bucket,
+ __irq_chain_pa)),
+ "i" (ASI_PHYS_USE_EC));
+}
+
+static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
+{
+ unsigned int ret;
+
+ __asm__ __volatile__("lduwa [%1] %2, %0"
+ : "=&r" (ret)
+ : "r" (bucket_pa +
+ offsetof(struct ino_bucket,
+ __virt_irq)),
+ "i" (ASI_PHYS_USE_EC));
+
+ return ret;
+}
+
+static void bucket_set_virt_irq(unsigned long bucket_pa,
+ unsigned int virt_irq)
+{
+ __asm__ __volatile__("stwa %0, [%1] %2"
+ : /* no outputs */
+ : "r" (virt_irq),
+ "r" (bucket_pa +
+ offsetof(struct ino_bucket,
+ __virt_irq)),
+ "i" (ASI_PHYS_USE_EC));
+}
+
+#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
static struct {
- unsigned int irq;
unsigned int dev_handle;
unsigned int dev_ino;
-} virt_to_real_irq_table[NR_IRQS];
+ unsigned int in_use;
+} virt_irq_table[NR_IRQS];
+static DEFINE_SPINLOCK(virt_irq_alloc_lock);
-static unsigned char virt_irq_alloc(unsigned int real_irq)
+unsigned char virt_irq_alloc(unsigned int dev_handle,
+ unsigned int dev_ino)
{
+ unsigned long flags;
unsigned char ent;
BUILD_BUG_ON(NR_IRQS >= 256);
+ spin_lock_irqsave(&virt_irq_alloc_lock, flags);
+
for (ent = 1; ent < NR_IRQS; ent++) {
- if (!virt_to_real_irq_table[ent].irq)
+ if (!virt_irq_table[ent].in_use)
break;
}
if (ent >= NR_IRQS) {
printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
- return 0;
+ ent = 0;
+ } else {
+ virt_irq_table[ent].dev_handle = dev_handle;
+ virt_irq_table[ent].dev_ino = dev_ino;
+ virt_irq_table[ent].in_use = 1;
}
- virt_to_real_irq_table[ent].irq = real_irq;
+ spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
return ent;
}
#ifdef CONFIG_PCI_MSI
-static void virt_irq_free(unsigned int virt_irq)
+void virt_irq_free(unsigned int virt_irq)
{
- unsigned int real_irq;
+ unsigned long flags;
if (virt_irq >= NR_IRQS)
return;
- real_irq = virt_to_real_irq_table[virt_irq].irq;
- virt_to_real_irq_table[virt_irq].irq = 0;
+ spin_lock_irqsave(&virt_irq_alloc_lock, flags);
- __bucket(real_irq)->virt_irq = 0;
-}
-#endif
+ virt_irq_table[virt_irq].in_use = 0;
-static unsigned int virt_to_real_irq(unsigned char virt_irq)
-{
- return virt_to_real_irq_table[virt_irq].irq;
+ spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
}
+#endif
/*
* /proc/interrupts printing:
@@ -217,38 +259,8 @@ struct irq_handler_data {
void (*pre_handler)(unsigned int, void *, void *);
void *pre_handler_arg1;
void *pre_handler_arg2;
-
- u32 msi;
};
-void sparc64_set_msi(unsigned int virt_irq, u32 msi)
-{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
-
- if (data)
- data->msi = msi;
-}
-
-u32 sparc64_get_msi(unsigned int virt_irq)
-{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
-
- if (data)
- return data->msi;
- return 0xffffffff;
-}
-
-static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
-{
- unsigned int real_irq = virt_to_real_irq(virt_irq);
- struct ino_bucket *bucket = NULL;
-
- if (likely(real_irq))
- bucket = __bucket(real_irq);
-
- return bucket;
-}
-
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
{
@@ -348,201 +360,152 @@ static void sun4u_irq_end(unsigned int virt_irq)
static void sun4v_irq_enable(unsigned int virt_irq)
{
- struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
- unsigned int ino = bucket - &ivector_table[0];
-
- if (likely(bucket)) {
- unsigned long cpuid;
- int err;
+ unsigned int ino = virt_irq_table[virt_irq].dev_ino;
+ unsigned long cpuid = irq_choose_cpu(virt_irq);
+ int err;
- cpuid = irq_choose_cpu(virt_irq);
-
- err = sun4v_intr_settarget(ino, cpuid);
- if (err != HV_EOK)
- printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
- "err(%d)\n", ino, cpuid, err);
- err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
- if (err != HV_EOK)
- printk(KERN_ERR "sun4v_intr_setstate(%x): "
- "err(%d)\n", ino, err);
- err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
- if (err != HV_EOK)
- printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
- ino, err);
- }
+ err = sun4v_intr_settarget(ino, cpuid);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
+ "err(%d)\n", ino, cpuid, err);
+ err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_intr_setstate(%x): "
+ "err(%d)\n", ino, err);
+ err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
+ ino, err);
}
static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask)
{
- struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
- unsigned int ino = bucket - &ivector_table[0];
+ unsigned int ino = virt_irq_table[virt_irq].dev_ino;
+ unsigned long cpuid = irq_choose_cpu(virt_irq);
+ int err;
- if (likely(bucket)) {
- unsigned long cpuid;
- int err;
-
- cpuid = irq_choose_cpu(virt_irq);
-
- err = sun4v_intr_settarget(ino, cpuid);
- if (err != HV_EOK)
- printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
- "err(%d)\n", ino, cpuid, err);
- }
+ err = sun4v_intr_settarget(ino, cpuid);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
+ "err(%d)\n", ino, cpuid, err);
}
static void sun4v_irq_disable(unsigned int virt_irq)
{
- struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
- unsigned int ino = bucket - &ivector_table[0];
-
- if (likely(bucket)) {
- int err;
-
- err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
- if (err != HV_EOK)
- printk(KERN_ERR "sun4v_intr_setenabled(%x): "
- "err(%d)\n", ino, err);
- }
-}
-
-#ifdef CONFIG_PCI_MSI
-static void sun4v_msi_enable(unsigned int virt_irq)
-{
- sun4v_irq_enable(virt_irq);
- unmask_msi_irq(virt_irq);
-}
+ unsigned int ino = virt_irq_table[virt_irq].dev_ino;
+ int err;
-static void sun4v_msi_disable(unsigned int virt_irq)
-{
- mask_msi_irq(virt_irq);
- sun4v_irq_disable(virt_irq);
+ err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_intr_setenabled(%x): "
+ "err(%d)\n", ino, err);
}
-#endif
static void sun4v_irq_end(unsigned int virt_irq)
{
- struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
- unsigned int ino = bucket - &ivector_table[0];
+ unsigned int ino = virt_irq_table[virt_irq].dev_ino;
struct irq_desc *desc = irq_desc + virt_irq;
+ int err;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
return;
- if (likely(bucket)) {
- int err;
-
- err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
- if (err != HV_EOK)
- printk(KERN_ERR "sun4v_intr_setstate(%x): "
- "err(%d)\n", ino, err);
- }
+ err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_intr_setstate(%x): "
+ "err(%d)\n", ino, err);
}
static void sun4v_virq_enable(unsigned int virt_irq)
{
- struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
-
- if (likely(bucket)) {
- unsigned long cpuid, dev_handle, dev_ino;
- int err;
-
- cpuid = irq_choose_cpu(virt_irq);
-
- dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
- dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
-
- err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
- if (err != HV_EOK)
- printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
- "err(%d)\n",
- dev_handle, dev_ino, cpuid, err);
- err = sun4v_vintr_set_state(dev_handle, dev_ino,
- HV_INTR_STATE_IDLE);
- if (err != HV_EOK)
- printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
- "HV_INTR_STATE_IDLE): err(%d)\n",
- dev_handle, dev_ino, err);
- err = sun4v_vintr_set_valid(dev_handle, dev_ino,
- HV_INTR_ENABLED);
- if (err != HV_EOK)
- printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
- "HV_INTR_ENABLED): err(%d)\n",
- dev_handle, dev_ino, err);
- }
+ unsigned long cpuid, dev_handle, dev_ino;
+ int err;
+
+ cpuid = irq_choose_cpu(virt_irq);
+
+ dev_handle = virt_irq_table[virt_irq].dev_handle;
+ dev_ino = virt_irq_table[virt_irq].dev_ino;
+
+ err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
+ "err(%d)\n",
+ dev_handle, dev_ino, cpuid, err);
+ err = sun4v_vintr_set_state(dev_handle, dev_ino,
+ HV_INTR_STATE_IDLE);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
+ "HV_INTR_STATE_IDLE): err(%d)\n",
+ dev_handle, dev_ino, err);
+ err = sun4v_vintr_set_valid(dev_handle, dev_ino,
+ HV_INTR_ENABLED);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
+ "HV_INTR_ENABLED): err(%d)\n",
+ dev_handle, dev_ino, err);
}
static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
{
- struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
+ unsigned long cpuid, dev_handle, dev_ino;
+ int err;
- if (likely(bucket)) {
- unsigned long cpuid, dev_handle, dev_ino;
- int err;
+ cpuid = irq_choose_cpu(virt_irq);
- cpuid = irq_choose_cpu(virt_irq);
+ dev_handle = virt_irq_table[virt_irq].dev_handle;
+ dev_ino = virt_irq_table[virt_irq].dev_ino;
- dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
- dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
-
- err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
- if (err != HV_EOK)
- printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
- "err(%d)\n",
- dev_handle, dev_ino, cpuid, err);
- }
+ err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
+ "err(%d)\n",
+ dev_handle, dev_ino, cpuid, err);
}
static void sun4v_virq_disable(unsigned int virt_irq)
{
- struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
+ unsigned long dev_handle, dev_ino;
+ int err;
- if (likely(bucket)) {
- unsigned long dev_handle, dev_ino;
- int err;
+ dev_handle = virt_irq_table[virt_irq].dev_handle;
+ dev_ino = virt_irq_table[virt_irq].dev_ino;
- dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
- dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
-
- err = sun4v_vintr_set_valid(dev_handle, dev_ino,
- HV_INTR_DISABLED);
- if (err != HV_EOK)
- printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
- "HV_INTR_DISABLED): err(%d)\n",
- dev_handle, dev_ino, err);
- }
+ err = sun4v_vintr_set_valid(dev_handle, dev_ino,
+ HV_INTR_DISABLED);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
+ "HV_INTR_DISABLED): err(%d)\n",
+ dev_handle, dev_ino, err);
}
static void sun4v_virq_end(unsigned int virt_irq)
{
- struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
struct irq_desc *desc = irq_desc + virt_irq;
+ unsigned long dev_handle, dev_ino;
+ int err;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
return;
- if (likely(bucket)) {
- unsigned long dev_handle, dev_ino;
- int err;
+ dev_handle = virt_irq_table[virt_irq].dev_handle;
+ dev_ino = virt_irq_table[virt_irq].dev_ino;
- dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
- dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
-
- err = sun4v_vintr_set_state(dev_handle, dev_ino,
- HV_INTR_STATE_IDLE);
- if (err != HV_EOK)
- printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
- "HV_INTR_STATE_IDLE): err(%d)\n",
- dev_handle, dev_ino, err);
- }
+ err = sun4v_vintr_set_state(dev_handle, dev_ino,
+ HV_INTR_STATE_IDLE);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
+ "HV_INTR_STATE_IDLE): err(%d)\n",
+ dev_handle, dev_ino, err);
}
static void run_pre_handler(unsigned int virt_irq)
{
- struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+ unsigned int ino;
+ ino = virt_irq_table[virt_irq].dev_ino;
if (likely(data->pre_handler)) {
- data->pre_handler(__irq_ino(__irq(bucket)),
+ data->pre_handler(ino,
data->pre_handler_arg1,
data->pre_handler_arg2);
}
@@ -573,28 +536,6 @@ static struct irq_chip sun4v_irq = {
.set_affinity = sun4v_set_affinity,
};
-static struct irq_chip sun4v_irq_ack = {
- .typename = "sun4v+ack",
- .enable = sun4v_irq_enable,
- .disable = sun4v_irq_disable,
- .ack = run_pre_handler,
- .end = sun4v_irq_end,
- .set_affinity = sun4v_set_affinity,
-};
-
-#ifdef CONFIG_PCI_MSI
-static struct irq_chip sun4v_msi = {
- .typename = "sun4v+msi",
- .mask = mask_msi_irq,
- .unmask = unmask_msi_irq,
- .enable = sun4v_msi_enable,
- .disable = sun4v_msi_disable,
- .ack = run_pre_handler,
- .end = sun4v_irq_end,
- .set_affinity = sun4v_set_affinity,
-};
-#endif
-
static struct irq_chip sun4v_virq = {
.typename = "vsun4v",
.enable = sun4v_virq_enable,
@@ -603,59 +544,48 @@ static struct irq_chip sun4v_virq = {
.set_affinity = sun4v_virt_set_affinity,
};
-static struct irq_chip sun4v_virq_ack = {
- .typename = "vsun4v+ack",
- .enable = sun4v_virq_enable,
- .disable = sun4v_virq_disable,
- .ack = run_pre_handler,
- .end = sun4v_virq_end,
- .set_affinity = sun4v_virt_set_affinity,
-};
-
void irq_install_pre_handler(int virt_irq,
void (*func)(unsigned int, void *, void *),
void *arg1, void *arg2)
{
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
- struct irq_chip *chip;
+ struct irq_chip *chip = get_irq_chip(virt_irq);
+
+ if (WARN_ON(chip == &sun4v_irq || chip == &sun4v_virq)) {
+ printk(KERN_ERR "IRQ: Trying to install pre-handler on "
+ "sun4v irq %u\n", virt_irq);
+ return;
+ }
data->pre_handler = func;
data->pre_handler_arg1 = arg1;
data->pre_handler_arg2 = arg2;
- chip = get_irq_chip(virt_irq);
- if (chip == &sun4u_irq_ack ||
- chip == &sun4v_irq_ack ||
- chip == &sun4v_virq_ack
-#ifdef CONFIG_PCI_MSI
- || chip == &sun4v_msi
-#endif
- )
+ if (chip == &sun4u_irq_ack)
return;
- chip = (chip == &sun4u_irq ?
- &sun4u_irq_ack :
- (chip == &sun4v_irq ?
- &sun4v_irq_ack : &sun4v_virq_ack));
- set_irq_chip(virt_irq, chip);
+ set_irq_chip(virt_irq, &sun4u_irq_ack);
}
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
struct ino_bucket *bucket;
struct irq_handler_data *data;
+ unsigned int virt_irq;
int ino;
BUG_ON(tlb_type == hypervisor);
ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
bucket = &ivector_table[ino];
- if (!bucket->virt_irq) {
- bucket->virt_irq = virt_irq_alloc(__irq(bucket));
- set_irq_chip(bucket->virt_irq, &sun4u_irq);
+ virt_irq = bucket_get_virt_irq(__pa(bucket));
+ if (!virt_irq) {
+ virt_irq = virt_irq_alloc(0, ino);
+ bucket_set_virt_irq(__pa(bucket), virt_irq);
+ set_irq_chip(virt_irq, &sun4u_irq);
}
- data = get_irq_chip_data(bucket->virt_irq);
+ data = get_irq_chip_data(virt_irq);
if (unlikely(data))
goto out;
@@ -664,13 +594,13 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
- set_irq_chip_data(bucket->virt_irq, data);
+ set_irq_chip_data(virt_irq, data);
data->imap = imap;
data->iclr = iclr;
out:
- return bucket->virt_irq;
+ return virt_irq;
}
static unsigned int sun4v_build_common(unsigned long sysino,
@@ -678,16 +608,19 @@ static unsigned int sun4v_build_common(unsigned long sysino,
{
struct ino_bucket *bucket;
struct irq_handler_data *data;
+ unsigned int virt_irq;
BUG_ON(tlb_type != hypervisor);
bucket = &ivector_table[sysino];
- if (!bucket->virt_irq) {
- bucket->virt_irq = virt_irq_alloc(__irq(bucket));
- set_irq_chip(bucket->virt_irq, chip);
+ virt_irq = bucket_get_virt_irq(__pa(bucket));
+ if (!virt_irq) {
+ virt_irq = virt_irq_alloc(0, sysino);
+ bucket_set_virt_irq(__pa(bucket), virt_irq);
+ set_irq_chip(virt_irq, chip);
}
- data = get_irq_chip_data(bucket->virt_irq);
+ data = get_irq_chip_data(virt_irq);
if (unlikely(data))
goto out;
@@ -696,7 +629,7 @@ static unsigned int sun4v_build_common(unsigned long sysino,
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
- set_irq_chip_data(bucket->virt_irq, data);
+ set_irq_chip_data(virt_irq, data);
/* Catch accidental accesses to these things. IMAP/ICLR handling
* is done by hypervisor calls on sun4v platforms, not by direct
@@ -706,7 +639,7 @@ static unsigned int sun4v_build_common(unsigned long sysino,
data->iclr = ~0UL;
out:
- return bucket->virt_irq;
+ return virt_irq;
}
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
@@ -718,86 +651,52 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
- unsigned long sysino, hv_err;
- unsigned int virq;
-
- BUG_ON(devhandle & devino);
-
- sysino = devhandle | devino;
- BUG_ON(sysino & ~(IMAP_IGN | IMAP_INO));
-
- hv_err = sun4v_vintr_set_cookie(devhandle, devino, sysino);
- if (hv_err) {
- prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
- "err=%lu\n", devhandle, devino, hv_err);
- prom_halt();
- }
-
- virq = sun4v_build_common(sysino, &sun4v_virq);
-
- virt_to_real_irq_table[virq].dev_handle = devhandle;
- virt_to_real_irq_table[virq].dev_ino = devino;
-
- return virq;
-}
-
-#ifdef CONFIG_PCI_MSI
-unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
- unsigned int msi_start, unsigned int msi_end)
-{
- struct ino_bucket *bucket;
struct irq_handler_data *data;
- unsigned long sysino;
- unsigned int devino;
-
- BUG_ON(tlb_type != hypervisor);
-
- /* Find a free devino in the given range. */
- for (devino = msi_start; devino < msi_end; devino++) {
- sysino = sun4v_devino_to_sysino(devhandle, devino);
- bucket = &ivector_table[sysino];
- if (!bucket->virt_irq)
- break;
- }
- if (devino >= msi_end)
- return -ENOSPC;
+ struct ino_bucket *bucket;
+ unsigned long hv_err, cookie;
+ unsigned int virt_irq;
- sysino = sun4v_devino_to_sysino(devhandle, devino);
- bucket = &ivector_table[sysino];
- bucket->virt_irq = virt_irq_alloc(__irq(bucket));
- *virt_irq_p = bucket->virt_irq;
- set_irq_chip(bucket->virt_irq, &sun4v_msi);
+ bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
+ if (unlikely(!bucket))
+ return 0;
+ __flush_dcache_range((unsigned long) bucket,
+ ((unsigned long) bucket +
+ sizeof(struct ino_bucket)));
- data = get_irq_chip_data(bucket->virt_irq);
- if (unlikely(data))
- return devino;
+ virt_irq = virt_irq_alloc(devhandle, devino);
+ bucket_set_virt_irq(__pa(bucket), virt_irq);
+ set_irq_chip(virt_irq, &sun4v_virq);
data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!data)) {
- virt_irq_free(*virt_irq_p);
- return -ENOMEM;
- }
- set_irq_chip_data(bucket->virt_irq, data);
+ if (unlikely(!data))
+ return 0;
+
+ set_irq_chip_data(virt_irq, data);
+ /* Catch accidental accesses to these things. IMAP/ICLR handling
+ * is done by hypervisor calls on sun4v platforms, not by direct
+ * register accesses.
+ */
data->imap = ~0UL;
data->iclr = ~0UL;
- return devino;
-}
+ cookie = ~__pa(bucket);
+ hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
+ if (hv_err) {
+ prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
+ "err=%lu\n", devhandle, devino, hv_err);
+ prom_halt();
+ }
-void sun4v_destroy_msi(unsigned int virt_irq)
-{
- virt_irq_free(virt_irq);
+ return virt_irq;
}
-#endif
void ack_bad_irq(unsigned int virt_irq)
{
- struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
- unsigned int ino = 0xdeadbeef;
+ unsigned int ino = virt_irq_table[virt_irq].dev_ino;
- if (bucket)
- ino = bucket - &ivector_table[0];
+ if (!ino)
+ ino = 0xdeadbeef;
printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
ino, virt_irq);
@@ -805,7 +704,7 @@ void ack_bad_irq(unsigned int virt_irq)
void handler_irq(int irq, struct pt_regs *regs)
{
- struct ino_bucket *bucket;
+ unsigned long pstate, bucket_pa;
struct pt_regs *old_regs;
clear_softint(1 << irq);
@@ -813,15 +712,28 @@ void handler_irq(int irq, struct pt_regs *regs)
old_regs = set_irq_regs(regs);
irq_enter();
- /* Sliiiick... */
- bucket = __bucket(xchg32(irq_work(smp_processor_id()), 0));
- while (bucket) {
- struct ino_bucket *next = __bucket(bucket->irq_chain);
+ /* Grab an atomic snapshot of the pending IVECs. */
+ __asm__ __volatile__("rdpr %%pstate, %0\n\t"
+ "wrpr %0, %3, %%pstate\n\t"
+ "ldx [%2], %1\n\t"
+ "stx %%g0, [%2]\n\t"
+ "wrpr %0, 0x0, %%pstate\n\t"
+ : "=&r" (pstate), "=&r" (bucket_pa)
+ : "r" (irq_work_pa(smp_processor_id())),
+ "i" (PSTATE_IE)
+ : "memory");
+
+ while (bucket_pa) {
+ unsigned long next_pa;
+ unsigned int virt_irq;
- bucket->irq_chain = 0;
- __do_IRQ(bucket->virt_irq);
+ next_pa = bucket_get_chain_pa(bucket_pa);
+ virt_irq = bucket_get_virt_irq(bucket_pa);
+ bucket_clear_chain_pa(bucket_pa);
- bucket = next;
+ __do_IRQ(virt_irq);
+
+ bucket_pa = next_pa;
}
irq_exit();
@@ -921,7 +833,7 @@ void init_irqwork_curcpu(void)
{
int cpu = hard_smp_processor_id();
- trap_block[cpu].irq_worklist = 0;
+ trap_block[cpu].irq_worklist_pa = 0UL;
}
/* Please be very careful with register_one_mondo() and
@@ -1035,9 +947,21 @@ static struct irqaction timer_irq_action = {
/* Only invoked on boot processor. */
void __init init_IRQ(void)
{
+ unsigned long size;
+
map_prom_timers();
kill_prom_timer();
- memset(&ivector_table[0], 0, sizeof(ivector_table));
+
+ size = sizeof(struct ino_bucket) * NUM_IVECS;
+ ivector_table = alloc_bootmem_low(size);
+ if (!ivector_table) {
+ prom_printf("Fatal error, cannot allocate ivector_table\n");
+ prom_halt();
+ }
+ __flush_dcache_range((unsigned long) ivector_table,
+ ((unsigned long) ivector_table) + size);
+
+ ivector_table_pa = __pa(ivector_table);
if (tlb_type == hypervisor)
sun4v_init_mondo_queues();
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index c93a15b785fa..d94f901d321e 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -42,6 +42,8 @@
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
+
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
p->ainsn.insn[0] = *p->addr;
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index d4024ac0d619..964527d2ffa0 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -226,6 +226,15 @@ kvmap_dtlb_load:
ba,pt %xcc, sun4v_dtlb_load
mov %g5, %g3
+kvmap_vmemmap:
+ sub %g4, %g5, %g5
+ srlx %g5, 22, %g5
+ sethi %hi(vmemmap_table), %g1
+ sllx %g5, 3, %g5
+ or %g1, %lo(vmemmap_table), %g1
+ ba,pt %xcc, kvmap_dtlb_load
+ ldx [%g1 + %g5], %g5
+
kvmap_dtlb_nonlinear:
/* Catch kernel NULL pointer derefs. */
sethi %hi(PAGE_SIZE), %g5
@@ -233,6 +242,13 @@ kvmap_dtlb_nonlinear:
bleu,pn %xcc, kvmap_dtlb_longpath
nop
+ /* Do not use the TSB for vmemmap. */
+ mov (VMEMMAP_BASE >> 24), %g5
+ sllx %g5, 24, %g5
+ cmp %g4,%g5
+ bgeu,pn %xcc, kvmap_vmemmap
+ nop
+
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
kvmap_dtlb_tsbmiss:
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 4cc77485f536..42d779866fba 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -872,7 +872,10 @@ __setup("of_debug=", of_debug);
int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus)
{
/* initialize common driver fields */
- drv->driver.name = drv->name;
+ if (!drv->driver.name)
+ drv->driver.name = drv->name;
+ if (!drv->driver.owner)
+ drv->driver.owner = drv->owner;
drv->driver.bus = bus;
/* register with core */
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index e8dac81d8a0d..9b808640a193 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -29,8 +29,6 @@
#include "pci_impl.h"
-unsigned long pci_memspace_mask = 0xffffffffUL;
-
#ifndef CONFIG_PCI
/* A "nop" PCI implementation. */
asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn,
@@ -1066,8 +1064,8 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc
return 0;
}
-/* Adjust vm_pgoff of VMA such that it is the physical page offset corresponding
- * to the 32-bit pci bus offset for DEV requested by the user.
+/* Adjust vm_pgoff of VMA such that it is the physical page offset
+ * corresponding to the 32-bit pci bus offset for DEV requested by the user.
*
* Basically, the user finds the base address for his device which he wishes
* to mmap. They read the 32-bit value from the config space base register,
@@ -1076,21 +1074,35 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc
*
* Returns negative error code on failure, zero on success.
*/
-static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
+static int __pci_mmap_make_offset(struct pci_dev *pdev,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
- unsigned long user_offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long user32 = user_offset & pci_memspace_mask;
- unsigned long largest_base, this_base, addr32;
- int i;
+ unsigned long user_paddr, user_size;
+ int i, err;
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
- return __pci_mmap_make_offset_bus(dev, vma, mmap_state);
+ /* First compute the physical address in vma->vm_pgoff,
+ * making sure the user offset is within range in the
+ * appropriate PCI space.
+ */
+ err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state);
+ if (err)
+ return err;
+
+ /* If this is a mapping on a host bridge, any address
+ * is OK.
+ */
+ if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
+ return err;
+
+ /* Otherwise make sure it's in the range for one of the
+ * device's resources.
+ */
+ user_paddr = vma->vm_pgoff << PAGE_SHIFT;
+ user_size = vma->vm_end - vma->vm_start;
- /* Figure out which base address this is for. */
- largest_base = 0UL;
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- struct resource *rp = &dev->resource[i];
+ struct resource *rp = &pdev->resource[i];
/* Active? */
if (!rp->flags)
@@ -1108,26 +1120,14 @@ static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vm
continue;
}
- this_base = rp->start;
-
- addr32 = (this_base & PAGE_MASK) & pci_memspace_mask;
-
- if (mmap_state == pci_mmap_io)
- addr32 &= 0xffffff;
-
- if (addr32 <= user32 && this_base > largest_base)
- largest_base = this_base;
+ if ((rp->start <= user_paddr) &&
+ (user_paddr + user_size) <= (rp->end + 1UL))
+ break;
}
- if (largest_base == 0UL)
+ if (i > PCI_ROM_RESOURCE)
return -EINVAL;
- /* Now construct the final physical address. */
- if (mmap_state == pci_mmap_io)
- vma->vm_pgoff = (((largest_base & ~0xffffffUL) | user32) >> PAGE_SHIFT);
- else
- vma->vm_pgoff = (((largest_base & ~(pci_memspace_mask)) | user32) >> PAGE_SHIFT);
-
return 0;
}
diff --git a/arch/sparc64/kernel/pci_fire.c b/arch/sparc64/kernel/pci_fire.c
index 14d67fe21ab2..fef3b37487bf 100644
--- a/arch/sparc64/kernel/pci_fire.c
+++ b/arch/sparc64/kernel/pci_fire.c
@@ -6,9 +6,12 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/irq.h>
#include <asm/oplib.h>
#include <asm/prom.h>
+#include <asm/irq.h>
#include "pci_impl.h"
@@ -84,6 +87,266 @@ static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
return 0;
}
+#ifdef CONFIG_PCI_MSI
+struct pci_msiq_entry {
+ u64 word0;
+#define MSIQ_WORD0_RESV 0x8000000000000000UL
+#define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL
+#define MSIQ_WORD0_FMT_TYPE_SHIFT 56
+#define MSIQ_WORD0_LEN 0x00ffc00000000000UL
+#define MSIQ_WORD0_LEN_SHIFT 46
+#define MSIQ_WORD0_ADDR0 0x00003fff00000000UL
+#define MSIQ_WORD0_ADDR0_SHIFT 32
+#define MSIQ_WORD0_RID 0x00000000ffff0000UL
+#define MSIQ_WORD0_RID_SHIFT 16
+#define MSIQ_WORD0_DATA0 0x000000000000ffffUL
+#define MSIQ_WORD0_DATA0_SHIFT 0
+
+#define MSIQ_TYPE_MSG 0x6
+#define MSIQ_TYPE_MSI32 0xb
+#define MSIQ_TYPE_MSI64 0xf
+
+ u64 word1;
+#define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL
+#define MSIQ_WORD1_ADDR1_SHIFT 16
+#define MSIQ_WORD1_DATA1 0x000000000000ffffUL
+#define MSIQ_WORD1_DATA1_SHIFT 0
+
+ u64 resv[6];
+};
+
+/* All MSI registers are offset from pbm->pbm_regs */
+#define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL
+#define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
+
+#define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL)
+#define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL
+#define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL
+
+#define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL)
+#define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL
+#define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL
+#define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL
+
+#define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL)
+#define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL
+#define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL
+#define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL
+#define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL
+
+#define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL)
+#define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL
+#define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL
+
+#define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL)
+#define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL
+
+#define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL)
+#define MSI_MAP_VALID 0x8000000000000000UL
+#define MSI_MAP_EQWR_N 0x4000000000000000UL
+#define MSI_MAP_EQNUM 0x000000000000003fUL
+
+#define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL)
+#define MSI_CLEAR_EQWR_N 0x4000000000000000UL
+
+#define IMONDO_DATA0 0x02C000UL
+#define IMONDO_DATA0_DATA 0xffffffffffffffc0UL
+
+#define IMONDO_DATA1 0x02C008UL
+#define IMONDO_DATA1_DATA 0xffffffffffffffffUL
+
+#define MSI_32BIT_ADDR 0x034000UL
+#define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL
+
+#define MSI_64BIT_ADDR 0x034008UL
+#define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL
+
+static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long *head)
+{
+ *head = fire_read(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
+ return 0;
+}
+
+static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long *head, unsigned long *msi)
+{
+ unsigned long type_fmt, type, msi_num;
+ struct pci_msiq_entry *base, *ep;
+
+ base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
+ ep = &base[*head];
+
+ if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
+ return 0;
+
+ type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
+ MSIQ_WORD0_FMT_TYPE_SHIFT);
+ type = (type_fmt >> 3);
+ if (unlikely(type != MSIQ_TYPE_MSI32 &&
+ type != MSIQ_TYPE_MSI64))
+ return -EINVAL;
+
+ *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
+ MSIQ_WORD0_DATA0_SHIFT);
+
+ fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num),
+ MSI_CLEAR_EQWR_N);
+
+ /* Clear the entry. */
+ ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
+
+ /* Go to next entry in ring. */
+ (*head)++;
+ if (*head >= pbm->msiq_ent_count)
+ *head = 0;
+
+ return 1;
+}
+
+static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long head)
+{
+ fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid), head);
+ return 0;
+}
+
+static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long msi, int is_msi64)
+{
+ u64 val;
+
+ val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
+ val &= ~(MSI_MAP_EQNUM);
+ val |= msiqid;
+ fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
+
+ fire_write(pbm->pbm_regs + MSI_CLEAR(msi),
+ MSI_CLEAR_EQWR_N);
+
+ val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
+ val |= MSI_MAP_VALID;
+ fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
+
+ return 0;
+}
+
+static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
+{
+ unsigned long msiqid;
+ u64 val;
+
+ val = fire_read(pbm->pbm_regs + MSI_MAP(msi));
+ msiqid = (val & MSI_MAP_EQNUM);
+
+ val &= ~MSI_MAP_VALID;
+
+ fire_write(pbm->pbm_regs + MSI_MAP(msi), val);
+
+ return 0;
+}
+
+static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
+{
+ unsigned long pages, order, i;
+
+ order = get_order(512 * 1024);
+ pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
+ if (pages == 0UL) {
+ printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
+ order);
+ return -ENOMEM;
+ }
+ memset((char *)pages, 0, PAGE_SIZE << order);
+ pbm->msi_queues = (void *) pages;
+
+ fire_write(pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG,
+ (EVENT_QUEUE_BASE_ADDR_ALL_ONES |
+ __pa(pbm->msi_queues)));
+
+ fire_write(pbm->pbm_regs + IMONDO_DATA0,
+ pbm->portid << 6);
+ fire_write(pbm->pbm_regs + IMONDO_DATA1, 0);
+
+ fire_write(pbm->pbm_regs + MSI_32BIT_ADDR,
+ pbm->msi32_start);
+ fire_write(pbm->pbm_regs + MSI_64BIT_ADDR,
+ pbm->msi64_start);
+
+ for (i = 0; i < pbm->msiq_num; i++) {
+ fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(i), 0);
+ fire_write(pbm->pbm_regs + EVENT_QUEUE_TAIL(i), 0);
+ }
+
+ return 0;
+}
+
+static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
+{
+ unsigned long pages, order;
+
+ order = get_order(512 * 1024);
+ pages = (unsigned long) pbm->msi_queues;
+
+ free_pages(pages, order);
+
+ pbm->msi_queues = NULL;
+}
+
+static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
+ unsigned long msiqid,
+ unsigned long devino)
+{
+ unsigned long cregs = (unsigned long) pbm->pbm_regs;
+ unsigned long imap_reg, iclr_reg, int_ctrlr;
+ unsigned int virt_irq;
+ int fixup;
+ u64 val;
+
+ imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
+ iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
+
+ /* XXX iterate amongst the 4 IRQ controllers XXX */
+ int_ctrlr = (1UL << 6);
+
+ val = fire_read(imap_reg);
+ val |= (1UL << 63) | int_ctrlr;
+ fire_write(imap_reg, val);
+
+ fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
+
+ virt_irq = build_irq(fixup, iclr_reg, imap_reg);
+ if (!virt_irq)
+ return -ENOMEM;
+
+ fire_write(pbm->pbm_regs +
+ EVENT_QUEUE_CONTROL_SET(msiqid),
+ EVENT_QUEUE_CONTROL_SET_EN);
+
+ return virt_irq;
+}
+
+static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
+ .get_head = pci_fire_get_head,
+ .dequeue_msi = pci_fire_dequeue_msi,
+ .set_head = pci_fire_set_head,
+ .msi_setup = pci_fire_msi_setup,
+ .msi_teardown = pci_fire_msi_teardown,
+ .msiq_alloc = pci_fire_msiq_alloc,
+ .msiq_free = pci_fire_msiq_free,
+ .msiq_build_irq = pci_fire_msiq_build_irq,
+};
+
+static void pci_fire_msi_init(struct pci_pbm_info *pbm)
+{
+ sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
+}
+#else /* CONFIG_PCI_MSI */
+static void pci_fire_msi_init(struct pci_pbm_info *pbm)
+{
+}
+#endif /* !(CONFIG_PCI_MSI) */
+
/* Based at pbm->controller_regs */
#define FIRE_PARITY_CONTROL 0x470010UL
#define FIRE_PARITY_ENAB 0x8000000000000000UL
@@ -176,6 +439,7 @@ static int pci_fire_pbm_init(struct pci_controller_info *p,
{
const struct linux_prom64_registers *regs;
struct pci_pbm_info *pbm;
+ int err;
if ((portid & 1) == 0)
pbm = &p->pbm_A;
@@ -208,7 +472,13 @@ static int pci_fire_pbm_init(struct pci_controller_info *p,
pci_fire_hw_init(pbm);
- return pci_fire_pbm_iommu_init(pbm);
+ err = pci_fire_pbm_iommu_init(pbm);
+ if (err)
+ return err;
+
+ pci_fire_msi_init(pbm);
+
+ return 0;
}
static inline int portid_compare(u32 x, u32 y)
@@ -249,13 +519,6 @@ void fire_pci_init(struct device_node *dp, const char *model_name)
p->pbm_B.iommu = iommu;
- /* XXX MSI support XXX */
-
- /* Like PSYCHO and SCHIZO we have a 2GB aligned area
- * for memory space.
- */
- pci_memspace_mask = 0x7fffffffUL;
-
if (pci_fire_pbm_init(p, dp, portid))
goto fatal_memory_error;
diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h
index f660c2b685eb..4a50da13ce48 100644
--- a/arch/sparc64/kernel/pci_impl.h
+++ b/arch/sparc64/kernel/pci_impl.h
@@ -29,6 +29,33 @@
#define PCI_STC_FLUSHFLAG_SET(STC) \
(*((STC)->strbuf_flushflag) != 0UL)
+#ifdef CONFIG_PCI_MSI
+struct pci_pbm_info;
+struct sparc64_msiq_ops {
+ int (*get_head)(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long *head);
+ int (*dequeue_msi)(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long *head, unsigned long *msi);
+ int (*set_head)(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long head);
+ int (*msi_setup)(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long msi, int is_msi64);
+ int (*msi_teardown)(struct pci_pbm_info *pbm, unsigned long msi);
+ int (*msiq_alloc)(struct pci_pbm_info *pbm);
+ void (*msiq_free)(struct pci_pbm_info *pbm);
+ int (*msiq_build_irq)(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long devino);
+};
+
+extern void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
+ const struct sparc64_msiq_ops *ops);
+
+struct sparc64_msiq_cookie {
+ struct pci_pbm_info *pbm;
+ unsigned long msiqid;
+};
+#endif
+
struct pci_controller_info;
struct pci_pbm_info {
@@ -90,6 +117,8 @@ struct pci_pbm_info {
u32 msiq_ent_count;
u32 msiq_first;
u32 msiq_first_devino;
+ u32 msiq_rotor;
+ struct sparc64_msiq_cookie *msiq_irq_cookies;
u32 msi_num;
u32 msi_first;
u32 msi_data_mask;
@@ -100,9 +129,11 @@ struct pci_pbm_info {
u32 msi64_len;
void *msi_queues;
unsigned long *msi_bitmap;
+ unsigned int *msi_irq_table;
int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev,
struct msi_desc *entry);
void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev);
+ const struct sparc64_msiq_ops *msi_ops;
#endif /* !(CONFIG_PCI_MSI) */
/* This PBM's streaming buffer. */
@@ -126,7 +157,6 @@ struct pci_controller_info {
};
extern struct pci_pbm_info *pci_pbm_root;
-extern unsigned long pci_memspace_mask;
extern int pci_num_pbms;
diff --git a/arch/sparc64/kernel/pci_msi.c b/arch/sparc64/kernel/pci_msi.c
new file mode 100644
index 000000000000..31a165fd3e48
--- /dev/null
+++ b/arch/sparc64/kernel/pci_msi.c
@@ -0,0 +1,433 @@
+/* pci_msi.c: Sparc64 MSI support common layer.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "pci_impl.h"
+
+static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
+{
+ struct sparc64_msiq_cookie *msiq_cookie = cookie;
+ struct pci_pbm_info *pbm = msiq_cookie->pbm;
+ unsigned long msiqid = msiq_cookie->msiqid;
+ const struct sparc64_msiq_ops *ops;
+ unsigned long orig_head, head;
+ int err;
+
+ ops = pbm->msi_ops;
+
+ err = ops->get_head(pbm, msiqid, &head);
+ if (unlikely(err < 0))
+ goto err_get_head;
+
+ orig_head = head;
+ for (;;) {
+ unsigned long msi;
+
+ err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
+ if (likely(err > 0))
+ __do_IRQ(pbm->msi_irq_table[msi - pbm->msi_first]);
+
+ if (unlikely(err < 0))
+ goto err_dequeue;
+
+ if (err == 0)
+ break;
+ }
+ if (likely(head != orig_head)) {
+ err = ops->set_head(pbm, msiqid, head);
+ if (unlikely(err < 0))
+ goto err_set_head;
+ }
+ return IRQ_HANDLED;
+
+err_get_head:
+ printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n",
+ msiqid, err);
+ goto err_out;
+
+err_dequeue:
+ printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] "
+ "gives error %d\n",
+ head, msiqid, err);
+ goto err_out;
+
+err_set_head:
+ printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] "
+ "gives error %d\n",
+ head, msiqid, err);
+ goto err_out;
+
+err_out:
+ return IRQ_NONE;
+}
+
+static u32 pick_msiq(struct pci_pbm_info *pbm)
+{
+ static DEFINE_SPINLOCK(rotor_lock);
+ unsigned long flags;
+ u32 ret, rotor;
+
+ spin_lock_irqsave(&rotor_lock, flags);
+
+ rotor = pbm->msiq_rotor;
+ ret = pbm->msiq_first + rotor;
+
+ if (++rotor >= pbm->msiq_num)
+ rotor = 0;
+ pbm->msiq_rotor = rotor;
+
+ spin_unlock_irqrestore(&rotor_lock, flags);
+
+ return ret;
+}
+
+
+static int alloc_msi(struct pci_pbm_info *pbm)
+{
+ int i;
+
+ for (i = 0; i < pbm->msi_num; i++) {
+ if (!test_and_set_bit(i, pbm->msi_bitmap))
+ return i + pbm->msi_first;
+ }
+
+ return -ENOENT;
+}
+
+static void free_msi(struct pci_pbm_info *pbm, int msi_num)
+{
+ msi_num -= pbm->msi_first;
+ clear_bit(msi_num, pbm->msi_bitmap);
+}
+
+static struct irq_chip msi_irq = {
+ .typename = "PCI-MSI",
+ .mask = mask_msi_irq,
+ .unmask = unmask_msi_irq,
+ .enable = unmask_msi_irq,
+ .disable = mask_msi_irq,
+ /* XXX affinity XXX */
+};
+
+int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
+ struct pci_dev *pdev,
+ struct msi_desc *entry)
+{
+ struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
+ const struct sparc64_msiq_ops *ops = pbm->msi_ops;
+ struct msi_msg msg;
+ int msi, err;
+ u32 msiqid;
+
+ *virt_irq_p = virt_irq_alloc(0, 0);
+ err = -ENOMEM;
+ if (!*virt_irq_p)
+ goto out_err;
+
+ set_irq_chip(*virt_irq_p, &msi_irq);
+
+ err = alloc_msi(pbm);
+ if (unlikely(err < 0))
+ goto out_virt_irq_free;
+
+ msi = err;
+
+ msiqid = pick_msiq(pbm);
+
+ err = ops->msi_setup(pbm, msiqid, msi,
+ (entry->msi_attrib.is_64 ? 1 : 0));
+ if (err)
+ goto out_msi_free;
+
+ pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p;
+
+ if (entry->msi_attrib.is_64) {
+ msg.address_hi = pbm->msi64_start >> 32;
+ msg.address_lo = pbm->msi64_start & 0xffffffff;
+ } else {
+ msg.address_hi = 0;
+ msg.address_lo = pbm->msi32_start;
+ }
+ msg.data = msi;
+
+ set_irq_msi(*virt_irq_p, entry);
+ write_msi_msg(*virt_irq_p, &msg);
+
+ return 0;
+
+out_msi_free:
+ free_msi(pbm, msi);
+
+out_virt_irq_free:
+ set_irq_chip(*virt_irq_p, NULL);
+ virt_irq_free(*virt_irq_p);
+ *virt_irq_p = 0;
+
+out_err:
+ return err;
+}
+
+void sparc64_teardown_msi_irq(unsigned int virt_irq,
+ struct pci_dev *pdev)
+{
+ struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
+ const struct sparc64_msiq_ops *ops = pbm->msi_ops;
+ unsigned int msi_num;
+ int i, err;
+
+ for (i = 0; i < pbm->msi_num; i++) {
+ if (pbm->msi_irq_table[i] == virt_irq)
+ break;
+ }
+ if (i >= pbm->msi_num) {
+ printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
+ pbm->name, virt_irq);
+ return;
+ }
+
+ msi_num = pbm->msi_first + i;
+ pbm->msi_irq_table[i] = ~0U;
+
+ err = ops->msi_teardown(pbm, msi_num);
+ if (err) {
+ printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
+ "irq %u, gives error %d\n",
+ pbm->name, msi_num, virt_irq, err);
+ return;
+ }
+
+ free_msi(pbm, msi_num);
+
+ set_irq_chip(virt_irq, NULL);
+ virt_irq_free(virt_irq);
+}
+
+static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
+{
+ unsigned long size, bits_per_ulong;
+
+ bits_per_ulong = sizeof(unsigned long) * 8;
+ size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
+ size /= 8;
+ BUG_ON(size % sizeof(unsigned long));
+
+ pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
+ if (!pbm->msi_bitmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void msi_bitmap_free(struct pci_pbm_info *pbm)
+{
+ kfree(pbm->msi_bitmap);
+ pbm->msi_bitmap = NULL;
+}
+
+static int msi_table_alloc(struct pci_pbm_info *pbm)
+{
+ int size, i;
+
+ size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie);
+ pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL);
+ if (!pbm->msiq_irq_cookies)
+ return -ENOMEM;
+
+ for (i = 0; i < pbm->msiq_num; i++) {
+ struct sparc64_msiq_cookie *p;
+
+ p = &pbm->msiq_irq_cookies[i];
+ p->pbm = pbm;
+ p->msiqid = pbm->msiq_first + i;
+ }
+
+ size = pbm->msi_num * sizeof(unsigned int);
+ pbm->msi_irq_table = kzalloc(size, GFP_KERNEL);
+ if (!pbm->msi_irq_table) {
+ kfree(pbm->msiq_irq_cookies);
+ pbm->msiq_irq_cookies = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void msi_table_free(struct pci_pbm_info *pbm)
+{
+ kfree(pbm->msiq_irq_cookies);
+ pbm->msiq_irq_cookies = NULL;
+
+ kfree(pbm->msi_irq_table);
+ pbm->msi_irq_table = NULL;
+}
+
+static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
+ const struct sparc64_msiq_ops *ops,
+ unsigned long msiqid,
+ unsigned long devino)
+{
+ int irq = ops->msiq_build_irq(pbm, msiqid, devino);
+ int err;
+
+ if (irq < 0)
+ return irq;
+
+ err = request_irq(irq, sparc64_msiq_interrupt, 0,
+ "MSIQ",
+ &pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm,
+ const struct sparc64_msiq_ops *ops)
+{
+ int i;
+
+ for (i = 0; i < pbm->msiq_num; i++) {
+ unsigned long msiqid = i + pbm->msiq_first;
+ unsigned long devino = i + pbm->msiq_first_devino;
+ int err;
+
+ err = bringup_one_msi_queue(pbm, ops, msiqid, devino);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
+ const struct sparc64_msiq_ops *ops)
+{
+ const u32 *val;
+ int len;
+
+ val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
+ if (!val || len != 4)
+ goto no_msi;
+ pbm->msiq_num = *val;
+ if (pbm->msiq_num) {
+ const struct msiq_prop {
+ u32 first_msiq;
+ u32 num_msiq;
+ u32 first_devino;
+ } *mqp;
+ const struct msi_range_prop {
+ u32 first_msi;
+ u32 num_msi;
+ } *mrng;
+ const struct addr_range_prop {
+ u32 msi32_high;
+ u32 msi32_low;
+ u32 msi32_len;
+ u32 msi64_high;
+ u32 msi64_low;
+ u32 msi64_len;
+ } *arng;
+
+ val = of_get_property(pbm->prom_node, "msi-eq-size", &len);
+ if (!val || len != 4)
+ goto no_msi;
+
+ pbm->msiq_ent_count = *val;
+
+ mqp = of_get_property(pbm->prom_node,
+ "msi-eq-to-devino", &len);
+ if (!mqp)
+ mqp = of_get_property(pbm->prom_node,
+ "msi-eq-devino", &len);
+ if (!mqp || len != sizeof(struct msiq_prop))
+ goto no_msi;
+
+ pbm->msiq_first = mqp->first_msiq;
+ pbm->msiq_first_devino = mqp->first_devino;
+
+ val = of_get_property(pbm->prom_node, "#msi", &len);
+ if (!val || len != 4)
+ goto no_msi;
+ pbm->msi_num = *val;
+
+ mrng = of_get_property(pbm->prom_node, "msi-ranges", &len);
+ if (!mrng || len != sizeof(struct msi_range_prop))
+ goto no_msi;
+ pbm->msi_first = mrng->first_msi;
+
+ val = of_get_property(pbm->prom_node, "msi-data-mask", &len);
+ if (!val || len != 4)
+ goto no_msi;
+ pbm->msi_data_mask = *val;
+
+ val = of_get_property(pbm->prom_node, "msix-data-width", &len);
+ if (!val || len != 4)
+ goto no_msi;
+ pbm->msix_data_width = *val;
+
+ arng = of_get_property(pbm->prom_node, "msi-address-ranges",
+ &len);
+ if (!arng || len != sizeof(struct addr_range_prop))
+ goto no_msi;
+ pbm->msi32_start = ((u64)arng->msi32_high << 32) |
+ (u64) arng->msi32_low;
+ pbm->msi64_start = ((u64)arng->msi64_high << 32) |
+ (u64) arng->msi64_low;
+ pbm->msi32_len = arng->msi32_len;
+ pbm->msi64_len = arng->msi64_len;
+
+ if (msi_bitmap_alloc(pbm))
+ goto no_msi;
+
+ if (msi_table_alloc(pbm)) {
+ msi_bitmap_free(pbm);
+ goto no_msi;
+ }
+
+ if (ops->msiq_alloc(pbm)) {
+ msi_table_free(pbm);
+ msi_bitmap_free(pbm);
+ goto no_msi;
+ }
+
+ if (sparc64_bringup_msi_queues(pbm, ops)) {
+ ops->msiq_free(pbm);
+ msi_table_free(pbm);
+ msi_bitmap_free(pbm);
+ goto no_msi;
+ }
+
+ printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
+ "devino[0x%x]\n",
+ pbm->name,
+ pbm->msiq_first, pbm->msiq_num,
+ pbm->msiq_ent_count,
+ pbm->msiq_first_devino);
+ printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
+ "width[%u]\n",
+ pbm->name,
+ pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
+ pbm->msix_data_width);
+ printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
+ "addr64[0x%lx:0x%x]\n",
+ pbm->name,
+ pbm->msi32_start, pbm->msi32_len,
+ pbm->msi64_start, pbm->msi64_len);
+ printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
+ pbm->name,
+ __pa(pbm->msi_queues));
+
+ pbm->msi_ops = ops;
+ pbm->setup_msi_irq = sparc64_setup_msi_irq;
+ pbm->teardown_msi_irq = sparc64_teardown_msi_irq;
+ }
+ return;
+
+no_msi:
+ pbm->msiq_num = 0;
+ printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
+}
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index b6b4cfea5b5f..d27ee5d528a2 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1058,12 +1058,6 @@ void psycho_init(struct device_node *dp, char *model_name)
p->pbm_A.config_space = p->pbm_B.config_space =
(pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE);
- /*
- * Psycho's PCI MEM space is mapped to a 2GB aligned area, so
- * we need to adjust our MEM space mask.
- */
- pci_memspace_mask = 0x7fffffffUL;
-
psycho_controller_hwinit(&p->pbm_A);
if (psycho_iommu_init(&p->pbm_A))
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index 3c30bfa1f3a3..9546ba9f5dee 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -1464,9 +1464,6 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ
p->pbm_B.iommu = iommu;
- /* Like PSYCHO we have a 2GB aligned area for memory space. */
- pci_memspace_mask = 0x7fffffffUL;
-
if (schizo_pbm_init(p, dp, portid, chip_type))
goto fatal_memory_error;
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index da724b13e89e..cacacfae5451 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -13,6 +13,7 @@
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/log2.h>
+#include <linux/scatterlist.h>
#include <asm/iommu.h>
#include <asm/irq.h>
@@ -373,7 +374,7 @@ static inline long fill_sg(long entry, struct device *dev,
int nused, int nelems, unsigned long prot)
{
struct scatterlist *dma_sg = sg;
- struct scatterlist *sg_end = sg + nelems;
+ struct scatterlist *sg_end = sg_last(sg, nelems);
unsigned long flags;
int i;
@@ -413,7 +414,7 @@ static inline long fill_sg(long entry, struct device *dev,
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
break;
}
- sg++;
+ sg = sg_next(sg);
}
pteval = (pteval & IOPTE_PAGE);
@@ -431,24 +432,25 @@ static inline long fill_sg(long entry, struct device *dev,
}
pteval = (pteval & IOPTE_PAGE) + len;
- sg++;
+ sg = sg_next(sg);
/* Skip over any tail mappings we've fully mapped,
* adjusting pteval along the way. Stop when we
* detect a page crossing event.
*/
- while (sg < sg_end &&
- (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
+ while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
((pteval ^
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
pteval += sg->length;
- sg++;
+ if (sg == sg_end)
+ break;
+ sg = sg_next(sg);
}
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
pteval = ~0UL;
} while (dma_npages != 0);
- dma_sg++;
+ dma_sg = sg_next(dma_sg);
}
if (unlikely(iommu_batch_end() < 0L))
@@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
sgtmp = sglist;
while (used && sgtmp->dma_length) {
sgtmp->dma_address += dma_base;
- sgtmp++;
+ sgtmp = sg_next(sgtmp);
used--;
}
used = nelems - used;
@@ -545,6 +547,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct pci_pbm_info *pbm;
struct iommu *iommu;
unsigned long flags, i, npages;
+ struct scatterlist *sg, *sgprv;
long entry;
u32 devhandle, bus_addr;
@@ -558,12 +561,15 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
devhandle = pbm->devhandle;
bus_addr = sglist->dma_address & IO_PAGE_MASK;
-
- for (i = 1; i < nelems; i++)
- if (sglist[i].dma_length == 0)
+ sgprv = NULL;
+ for_each_sg(sglist, sg, nelems, i) {
+ if (sg->dma_length == 0)
break;
- i--;
- npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
+
+ sgprv = sg;
+ }
+
+ npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
bus_addr) >> IO_PAGE_SHIFT;
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
@@ -748,111 +754,102 @@ struct pci_sun4v_msiq_entry {
u64 reserved2;
};
-/* For now this just runs as a pre-handler for the real interrupt handler.
- * So we just walk through the queue and ACK all the entries, update the
- * head pointer, and return.
- *
- * In the longer term it would be nice to do something more integrated
- * wherein we can pass in some of this MSI info to the drivers. This
- * would be most useful for PCIe fabric error messages, although we could
- * invoke those directly from the loop here in order to pass the info around.
- */
-static void pci_sun4v_msi_prehandler(unsigned int ino, void *data1, void *data2)
+static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long *head)
{
- struct pci_pbm_info *pbm = data1;
- struct pci_sun4v_msiq_entry *base, *ep;
- unsigned long msiqid, orig_head, head, type, err;
-
- msiqid = (unsigned long) data2;
+ unsigned long err, limit;
- head = 0xdeadbeef;
- err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, &head);
+ err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
if (unlikely(err))
- goto hv_error_get;
-
- if (unlikely(head >= (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))))
- goto bad_offset;
-
- head /= sizeof(struct pci_sun4v_msiq_entry);
- orig_head = head;
- base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
- (pbm->msiq_ent_count *
- sizeof(struct pci_sun4v_msiq_entry))));
- ep = &base[head];
- while ((ep->version_type & MSIQ_TYPE_MASK) != 0) {
- type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
- if (unlikely(type != MSIQ_TYPE_MSI32 &&
- type != MSIQ_TYPE_MSI64))
- goto bad_type;
-
- pci_sun4v_msi_setstate(pbm->devhandle,
- ep->msi_data /* msi_num */,
- HV_MSISTATE_IDLE);
-
- /* Clear the entry. */
- ep->version_type &= ~MSIQ_TYPE_MASK;
-
- /* Go to next entry in ring. */
- head++;
- if (head >= pbm->msiq_ent_count)
- head = 0;
- ep = &base[head];
- }
+ return -ENXIO;
- if (likely(head != orig_head)) {
- /* ACK entries by updating head pointer. */
- head *= sizeof(struct pci_sun4v_msiq_entry);
- err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
- if (unlikely(err))
- goto hv_error_set;
- }
- return;
+ limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
+ if (unlikely(*head >= limit))
+ return -EFBIG;
-hv_error_set:
- printk(KERN_EMERG "MSI: Hypervisor set head gives error %lu\n", err);
- goto hv_error_cont;
+ return 0;
+}
-hv_error_get:
- printk(KERN_EMERG "MSI: Hypervisor get head gives error %lu\n", err);
+static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
+ unsigned long msiqid, unsigned long *head,
+ unsigned long *msi)
+{
+ struct pci_sun4v_msiq_entry *ep;
+ unsigned long err, type;
-hv_error_cont:
- printk(KERN_EMERG "MSI: devhandle[%x] msiqid[%lx] head[%lu]\n",
- pbm->devhandle, msiqid, head);
- return;
+ /* Note: void pointer arithmetic, 'head' is a byte offset */
+ ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
+ (pbm->msiq_ent_count *
+ sizeof(struct pci_sun4v_msiq_entry))) +
+ *head);
-bad_offset:
- printk(KERN_EMERG "MSI: Hypervisor gives bad offset %lx max(%lx)\n",
- head, pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry));
- return;
+ if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
+ return 0;
-bad_type:
- printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type);
- return;
+ type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
+ if (unlikely(type != MSIQ_TYPE_MSI32 &&
+ type != MSIQ_TYPE_MSI64))
+ return -EINVAL;
+
+ *msi = ep->msi_data;
+
+ err = pci_sun4v_msi_setstate(pbm->devhandle,
+ ep->msi_data /* msi_num */,
+ HV_MSISTATE_IDLE);
+ if (unlikely(err))
+ return -ENXIO;
+
+ /* Clear the entry. */
+ ep->version_type &= ~MSIQ_TYPE_MASK;
+
+ (*head) += sizeof(struct pci_sun4v_msiq_entry);
+ if (*head >=
+ (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
+ *head = 0;
+
+ return 1;
}
-static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
+static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long head)
{
- unsigned long size, bits_per_ulong;
+ unsigned long err;
- bits_per_ulong = sizeof(unsigned long) * 8;
- size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
- size /= 8;
- BUG_ON(size % sizeof(unsigned long));
+ err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
+ if (unlikely(err))
+ return -EINVAL;
- pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
- if (!pbm->msi_bitmap)
- return -ENOMEM;
+ return 0;
+}
+static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
+ unsigned long msi, int is_msi64)
+{
+ if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
+ (is_msi64 ?
+ HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
+ return -ENXIO;
+ if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
+ return -ENXIO;
+ if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
+ return -ENXIO;
return 0;
}
-static void msi_bitmap_free(struct pci_pbm_info *pbm)
+static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
{
- kfree(pbm->msi_bitmap);
- pbm->msi_bitmap = NULL;
+ unsigned long err, msiqid;
+
+ err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
+ if (err)
+ return -ENXIO;
+
+ pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
+
+ return 0;
}
-static int msi_queue_alloc(struct pci_pbm_info *pbm)
+static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
{
unsigned long q_size, alloc_size, pages, order;
int i;
@@ -906,232 +903,59 @@ h_error:
return -EINVAL;
}
-
-static int alloc_msi(struct pci_pbm_info *pbm)
+static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
{
+ unsigned long q_size, alloc_size, pages, order;
int i;
- for (i = 0; i < pbm->msi_num; i++) {
- if (!test_and_set_bit(i, pbm->msi_bitmap))
- return i + pbm->msi_first;
- }
-
- return -ENOENT;
-}
-
-static void free_msi(struct pci_pbm_info *pbm, int msi_num)
-{
- msi_num -= pbm->msi_first;
- clear_bit(msi_num, pbm->msi_bitmap);
-}
-
-static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
- struct pci_dev *pdev,
- struct msi_desc *entry)
-{
- struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
- unsigned long devino, msiqid;
- struct msi_msg msg;
- int msi_num, err;
-
- *virt_irq_p = 0;
-
- msi_num = alloc_msi(pbm);
- if (msi_num < 0)
- return msi_num;
-
- err = sun4v_build_msi(pbm->devhandle, virt_irq_p,
- pbm->msiq_first_devino,
- (pbm->msiq_first_devino +
- pbm->msiq_num));
- if (err < 0)
- goto out_err;
- devino = err;
-
- msiqid = ((devino - pbm->msiq_first_devino) +
- pbm->msiq_first);
-
- err = -EINVAL;
- if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
- if (err)
- goto out_err;
-
- if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
- goto out_err;
-
- if (pci_sun4v_msi_setmsiq(pbm->devhandle,
- msi_num, msiqid,
- (entry->msi_attrib.is_64 ?
- HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
- goto out_err;
-
- if (pci_sun4v_msi_setstate(pbm->devhandle, msi_num, HV_MSISTATE_IDLE))
- goto out_err;
-
- if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID))
- goto out_err;
-
- sparc64_set_msi(*virt_irq_p, msi_num);
+ for (i = 0; i < pbm->msiq_num; i++) {
+ unsigned long msiqid = pbm->msiq_first + i;
- if (entry->msi_attrib.is_64) {
- msg.address_hi = pbm->msi64_start >> 32;
- msg.address_lo = pbm->msi64_start & 0xffffffff;
- } else {
- msg.address_hi = 0;
- msg.address_lo = pbm->msi32_start;
+ (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
}
- msg.data = msi_num;
- set_irq_msi(*virt_irq_p, entry);
- write_msi_msg(*virt_irq_p, &msg);
-
- irq_install_pre_handler(*virt_irq_p,
- pci_sun4v_msi_prehandler,
- pbm, (void *) msiqid);
+ q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
+ alloc_size = (pbm->msiq_num * q_size);
+ order = get_order(alloc_size);
- return 0;
+ pages = (unsigned long) pbm->msi_queues;
-out_err:
- free_msi(pbm, msi_num);
- return err;
+ free_pages(pages, order);
+ pbm->msi_queues = NULL;
}
-static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq,
- struct pci_dev *pdev)
+static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
+ unsigned long msiqid,
+ unsigned long devino)
{
- struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
- unsigned long msiqid, err;
- unsigned int msi_num;
-
- msi_num = sparc64_get_msi(virt_irq);
- err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid);
- if (err) {
- printk(KERN_ERR "%s: getmsiq gives error %lu\n",
- pbm->name, err);
- return;
- }
+ unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
- pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_INVALID);
- pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_INVALID);
+ if (!virt_irq)
+ return -ENOMEM;
- free_msi(pbm, msi_num);
+ if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+ return -EINVAL;
+ if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
+ return -EINVAL;
- /* The sun4v_destroy_msi() will liberate the devino and thus the MSIQ
- * allocation.
- */
- sun4v_destroy_msi(virt_irq);
+ return virt_irq;
}
+static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
+ .get_head = pci_sun4v_get_head,
+ .dequeue_msi = pci_sun4v_dequeue_msi,
+ .set_head = pci_sun4v_set_head,
+ .msi_setup = pci_sun4v_msi_setup,
+ .msi_teardown = pci_sun4v_msi_teardown,
+ .msiq_alloc = pci_sun4v_msiq_alloc,
+ .msiq_free = pci_sun4v_msiq_free,
+ .msiq_build_irq = pci_sun4v_msiq_build_irq,
+};
+
static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
{
- const u32 *val;
- int len;
-
- val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
- if (!val || len != 4)
- goto no_msi;
- pbm->msiq_num = *val;
- if (pbm->msiq_num) {
- const struct msiq_prop {
- u32 first_msiq;
- u32 num_msiq;
- u32 first_devino;
- } *mqp;
- const struct msi_range_prop {
- u32 first_msi;
- u32 num_msi;
- } *mrng;
- const struct addr_range_prop {
- u32 msi32_high;
- u32 msi32_low;
- u32 msi32_len;
- u32 msi64_high;
- u32 msi64_low;
- u32 msi64_len;
- } *arng;
-
- val = of_get_property(pbm->prom_node, "msi-eq-size", &len);
- if (!val || len != 4)
- goto no_msi;
-
- pbm->msiq_ent_count = *val;
-
- mqp = of_get_property(pbm->prom_node,
- "msi-eq-to-devino", &len);
- if (!mqp || len != sizeof(struct msiq_prop))
- goto no_msi;
-
- pbm->msiq_first = mqp->first_msiq;
- pbm->msiq_first_devino = mqp->first_devino;
-
- val = of_get_property(pbm->prom_node, "#msi", &len);
- if (!val || len != 4)
- goto no_msi;
- pbm->msi_num = *val;
-
- mrng = of_get_property(pbm->prom_node, "msi-ranges", &len);
- if (!mrng || len != sizeof(struct msi_range_prop))
- goto no_msi;
- pbm->msi_first = mrng->first_msi;
-
- val = of_get_property(pbm->prom_node, "msi-data-mask", &len);
- if (!val || len != 4)
- goto no_msi;
- pbm->msi_data_mask = *val;
-
- val = of_get_property(pbm->prom_node, "msix-data-width", &len);
- if (!val || len != 4)
- goto no_msi;
- pbm->msix_data_width = *val;
-
- arng = of_get_property(pbm->prom_node, "msi-address-ranges",
- &len);
- if (!arng || len != sizeof(struct addr_range_prop))
- goto no_msi;
- pbm->msi32_start = ((u64)arng->msi32_high << 32) |
- (u64) arng->msi32_low;
- pbm->msi64_start = ((u64)arng->msi64_high << 32) |
- (u64) arng->msi64_low;
- pbm->msi32_len = arng->msi32_len;
- pbm->msi64_len = arng->msi64_len;
-
- if (msi_bitmap_alloc(pbm))
- goto no_msi;
-
- if (msi_queue_alloc(pbm)) {
- msi_bitmap_free(pbm);
- goto no_msi;
- }
-
- printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
- "devino[0x%x]\n",
- pbm->name,
- pbm->msiq_first, pbm->msiq_num,
- pbm->msiq_ent_count,
- pbm->msiq_first_devino);
- printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
- "width[%u]\n",
- pbm->name,
- pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
- pbm->msix_data_width);
- printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
- "addr64[0x%lx:0x%x]\n",
- pbm->name,
- pbm->msi32_start, pbm->msi32_len,
- pbm->msi64_start, pbm->msi64_len);
- printk(KERN_INFO "%s: MSI queues at RA [%p]\n",
- pbm->name,
- pbm->msi_queues);
- }
- pbm->setup_msi_irq = pci_sun4v_setup_msi_irq;
- pbm->teardown_msi_irq = pci_sun4v_teardown_msi_irq;
-
- return;
-
-no_msi:
- pbm->msiq_num = 0;
- printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
+ sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
}
#else /* CONFIG_PCI_MSI */
static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
@@ -1237,11 +1061,6 @@ void __init sun4v_pci_init(struct device_node *dp, char *model_name)
p->pbm_B.iommu = iommu;
- /* Like PSYCHO and SCHIZO we have a 2GB aligned area
- * for memory space.
- */
- pci_memspace_mask = 0x7fffffffUL;
-
pci_sun4v_pbm_init(p, dp, devhandle);
return;
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 881a09ee4c4c..850cdffdd69c 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -105,9 +105,11 @@ static struct of_device_id power_match[] = {
};
static struct of_platform_driver power_driver = {
- .name = "power",
.match_table = power_match,
.probe = power_probe,
+ .driver = {
+ .name = "power",
+ },
};
void __init power_init(void)
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index c73b7a48b036..407d74a8a542 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -52,14 +52,13 @@ int sparc64_multi_core __read_mostly;
cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
-cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
- { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
EXPORT_SYMBOL(cpu_possible_map);
EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_SYMBOL(cpu_core_map);
static cpumask_t smp_commenced_mask;
@@ -1261,16 +1260,16 @@ void __devinit smp_fill_in_sib_core_maps(void)
for_each_present_cpu(i) {
unsigned int j;
- cpus_clear(cpu_sibling_map[i]);
+ cpus_clear(per_cpu(cpu_sibling_map, i));
if (cpu_data(i).proc_id == -1) {
- cpu_set(i, cpu_sibling_map[i]);
+ cpu_set(i, per_cpu(cpu_sibling_map, i));
continue;
}
for_each_present_cpu(j) {
if (cpu_data(i).proc_id ==
cpu_data(j).proc_id)
- cpu_set(j, cpu_sibling_map[i]);
+ cpu_set(j, per_cpu(cpu_sibling_map, i));
}
}
}
@@ -1342,9 +1341,9 @@ int __cpu_disable(void)
cpu_clear(cpu, cpu_core_map[i]);
cpus_clear(cpu_core_map[cpu]);
- for_each_cpu_mask(i, cpu_sibling_map[cpu])
- cpu_clear(cpu, cpu_sibling_map[i]);
- cpus_clear(cpu_sibling_map[cpu]);
+ for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
+ cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
+ cpus_clear(per_cpu(cpu_sibling_map, cpu));
c = &cpu_data(cpu);
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S
index 574bc248bca6..e2f8e1b4882a 100644
--- a/arch/sparc64/kernel/sun4v_ivec.S
+++ b/arch/sparc64/kernel/sun4v_ivec.S
@@ -96,19 +96,21 @@ sun4v_dev_mondo:
stxa %g2, [%g4] ASI_QUEUE
membar #Sync
- /* Get &__irq_work[smp_processor_id()] into %g1. */
- TRAP_LOAD_IRQ_WORK(%g1, %g4)
+ TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
- /* Get &ivector_table[IVEC] into %g4. */
- sethi %hi(ivector_table), %g4
- sllx %g3, 3, %g3
- or %g4, %lo(ivector_table), %g4
+ /* For VIRQs, cookie is encoded as ~bucket_phys_addr */
+ brlz,pt %g3, 1f
+ xnor %g3, %g0, %g4
+
+ /* Get __pa(&ivector_table[IVEC]) into %g4. */
+ sethi %hi(ivector_table_pa), %g4
+ ldx [%g4 + %lo(ivector_table_pa)], %g4
+ sllx %g3, 4, %g3
add %g4, %g3, %g4
- /* Insert ivector_table[] entry into __irq_work[] queue. */
- lduw [%g1], %g2 /* g2 = irq_work(cpu) */
- stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */
- stw %g4, [%g1] /* irq_work(cpu) = bucket */
+1: ldx [%g1], %g2
+ stxa %g2, [%g4] ASI_PHYS_USE_EC
+ stx %g4, [%g1]
/* Signal the interrupt by setting (1 << pil) in %softint. */
wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index d108eeb0734f..0d5c50264945 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -436,7 +436,7 @@ out:
asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
unsigned long third, void __user *ptr, long fifth)
{
- int err;
+ long err;
/* No need for backward compatibility. We can start fresh... */
if (call <= SEMCTL) {
@@ -453,16 +453,9 @@ asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
err = sys_semget(first, (int)second, (int)third);
goto out;
case SEMCTL: {
- union semun fourth;
- err = -EINVAL;
- if (!ptr)
- goto out;
- err = -EFAULT;
- if (get_user(fourth.__pad,
- (void __user * __user *) ptr))
- goto out;
- err = sys_semctl(first, (int)second | IPC_64,
- (int)third, fourth);
+ err = sys_semctl(first, third,
+ (int)second | IPC_64,
+ (union semun) ptr);
goto out;
}
default:
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 69cad1b653c1..cd8c740cba1d 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -764,9 +764,11 @@ static struct of_device_id clock_match[] = {
};
static struct of_platform_driver clock_driver = {
- .name = "clock",
.match_table = clock_match,
.probe = clock_probe,
+ .driver = {
+ .name = "clock",
+ },
};
static int __init clock_init(void)
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 6ef42b8e53d8..34573a55b6e5 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2569,8 +2569,8 @@ void __init trap_init(void)
offsetof(struct trap_per_cpu, tsb_huge)) ||
(TRAP_PER_CPU_TSB_HUGE_TEMP !=
offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
- (TRAP_PER_CPU_IRQ_WORKLIST !=
- offsetof(struct trap_per_cpu, irq_worklist)) ||
+ (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
+ offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
(TRAP_PER_CPU_CPU_MONDO_QMASK !=
offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
(TRAP_PER_CPU_DEV_MONDO_QMASK !=
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c
index 1f83fe6a82d6..791c15138f3a 100644
--- a/arch/sparc64/kernel/us2e_cpufreq.c
+++ b/arch/sparc64/kernel/us2e_cpufreq.c
@@ -326,7 +326,6 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
table[2].index = 5;
table[3].frequency = CPUFREQ_TABLE_END;
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index b982fa3dd748..9fcd503bc04a 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -10,105 +10,138 @@ ENTRY(_start)
jiffies = jiffies_64;
SECTIONS
{
- swapper_low_pmd_dir = 0x0000000000402000;
- . = 0x4000;
- .text 0x0000000000404000 :
- {
- _text = .;
- TEXT_TEXT
- SCHED_TEXT
- LOCK_TEXT
- KPROBES_TEXT
- *(.gnu.warning)
- } =0
- _etext = .;
- PROVIDE (etext = .);
+ swapper_low_pmd_dir = 0x0000000000402000;
+ . = 0x4000;
+ .text 0x0000000000404000 : {
+ _text = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ *(.gnu.warning)
+ } = 0
+ _etext = .;
+ PROVIDE (etext = .);
- RO_DATA(PAGE_SIZE)
+ RO_DATA(PAGE_SIZE)
+ .data : {
+ DATA_DATA
+ CONSTRUCTORS
+ }
+ .data1 : {
+ *(.data1)
+ }
+ . = ALIGN(64);
+ .data.cacheline_aligned : {
+ *(.data.cacheline_aligned)
+ }
+ . = ALIGN(64);
+ .data.read_mostly : {
+ *(.data.read_mostly)
+ }
+ _edata = .;
+ PROVIDE (edata = .);
+ .fixup : {
+ *(.fixup)
+ }
+ . = ALIGN(16);
+ __ex_table : {
+ __start___ex_table = .;
+ *(__ex_table)
+ __stop___ex_table = .;
+ }
+ NOTES
- .data :
- {
- DATA_DATA
- CONSTRUCTORS
- }
- .data1 : { *(.data1) }
- . = ALIGN(64);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
- . = ALIGN(64);
- .data.read_mostly : { *(.data.read_mostly) }
- _edata = .;
- PROVIDE (edata = .);
- .fixup : { *(.fixup) }
+ . = ALIGN(PAGE_SIZE);
+ .init.text : {
+ __init_begin = .;
+ _sinittext = .;
+ *(.init.text)
+ _einittext = .;
+ }
+ .init.data : {
+ *(.init.data)
+ }
+ . = ALIGN(16);
+ .init.setup : {
+ __setup_start = .;
+ *(.init.setup)
+ __setup_end = .;
+ }
+ .initcall.init : {
+ __initcall_start = .;
+ INITCALLS
+ __initcall_end = .;
+ }
+ .con_initcall.init : {
+ __con_initcall_start = .;
+ *(.con_initcall.init)
+ __con_initcall_end = .;
+ }
+ SECURITY_INIT
- . = ALIGN(16);
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
+ . = ALIGN(4);
+ .tsb_ldquad_phys_patch : {
+ __tsb_ldquad_phys_patch = .;
+ *(.tsb_ldquad_phys_patch)
+ __tsb_ldquad_phys_patch_end = .;
+ }
- NOTES
+ .tsb_phys_patch : {
+ __tsb_phys_patch = .;
+ *(.tsb_phys_patch)
+ __tsb_phys_patch_end = .;
+ }
- . = ALIGN(PAGE_SIZE);
- __init_begin = .;
- .init.text : {
- _sinittext = .;
- *(.init.text)
- _einittext = .;
- }
- .init.data : { *(.init.data) }
- . = ALIGN(16);
- __setup_start = .;
- .init.setup : { *(.init.setup) }
- __setup_end = .;
- __initcall_start = .;
- .initcall.init : {
- INITCALLS
- }
- __initcall_end = .;
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
- SECURITY_INIT
- . = ALIGN(4);
- __tsb_ldquad_phys_patch = .;
- .tsb_ldquad_phys_patch : { *(.tsb_ldquad_phys_patch) }
- __tsb_ldquad_phys_patch_end = .;
- __tsb_phys_patch = .;
- .tsb_phys_patch : { *(.tsb_phys_patch) }
- __tsb_phys_patch_end = .;
- __cpuid_patch = .;
- .cpuid_patch : { *(.cpuid_patch) }
- __cpuid_patch_end = .;
- __sun4v_1insn_patch = .;
- .sun4v_1insn_patch : { *(.sun4v_1insn_patch) }
- __sun4v_1insn_patch_end = .;
- __sun4v_2insn_patch = .;
- .sun4v_2insn_patch : { *(.sun4v_2insn_patch) }
- __sun4v_2insn_patch_end = .;
+ .cpuid_patch : {
+ __cpuid_patch = .;
+ *(.cpuid_patch)
+ __cpuid_patch_end = .;
+ }
+
+ .sun4v_1insn_patch : {
+ __sun4v_1insn_patch = .;
+ *(.sun4v_1insn_patch)
+ __sun4v_1insn_patch_end = .;
+ }
+ .sun4v_2insn_patch : {
+ __sun4v_2insn_patch = .;
+ *(.sun4v_2insn_patch)
+ __sun4v_2insn_patch_end = .;
+ }
#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
- __initramfs_start = .;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
+ . = ALIGN(PAGE_SIZE);
+ .init.ramfs : {
+ __initramfs_start = .;
+ *(.init.ramfs)
+ __initramfs_end = .;
+ }
#endif
- PERCPU(PAGE_SIZE)
+ PERCPU(PAGE_SIZE)
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
- __bss_start = .;
- .sbss : { *(.sbss) *(.scommon) }
- .bss :
- {
- *(.dynbss)
- *(.bss)
- *(COMMON)
- }
- _end = . ;
- PROVIDE (end = .);
- /DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) }
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+ __bss_start = .;
+ .sbss : {
+ *(.sbss)
+ *(.scommon)
+ }
+ .bss : {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ }
+ _end = . ;
+ PROVIDE (end = .);
- STABS_DEBUG
+ /DISCARD/ : {
+ *(.exit.text)
+ *(.exit.data)
+ *(.exitcall.exit)
+ }
- DWARF_DEBUG
+ STABS_DEBUG
+ DWARF_DEBUG
}
diff --git a/arch/sparc64/lib/xor.S b/arch/sparc64/lib/xor.S
index a79c8888170d..f44f58f40234 100644
--- a/arch/sparc64/lib/xor.S
+++ b/arch/sparc64/lib/xor.S
@@ -491,12 +491,12 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */
xor %g2, %i4, %g2
xor %g3, %i5, %g3
- ldda [%i7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */
+ ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */
xor %l0, %g2, %l0
xor %l1, %g3, %l1
stxa %l0, [%i0 + 0x00] %asi
stxa %l1, [%i0 + 0x08] %asi
- ldda [%i6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */
+ ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */
ldda [%i0 + 0x10] %asi, %l0 /* %l0/%l1 = dest + 0x10 */
xor %i4, %i2, %i4
@@ -504,12 +504,12 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */
xor %g2, %i4, %g2
xor %g3, %i5, %g3
- ldda [%i7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */
+ ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */
xor %l0, %g2, %l0
xor %l1, %g3, %l1
stxa %l0, [%i0 + 0x10] %asi
stxa %l1, [%i0 + 0x18] %asi
- ldda [%i6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */
+ ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */
ldda [%i0 + 0x20] %asi, %l0 /* %l0/%l1 = dest + 0x20 */
xor %i4, %i2, %i4
@@ -517,12 +517,12 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */
xor %g2, %i4, %g2
xor %g3, %i5, %g3
- ldda [%i7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */
+ ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */
xor %l0, %g2, %l0
xor %l1, %g3, %l1
stxa %l0, [%i0 + 0x20] %asi
stxa %l1, [%i0 + 0x28] %asi
- ldda [%i6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */
+ ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */
ldda [%i0 + 0x30] %asi, %l0 /* %l0/%l1 = dest + 0x30 */
prefetch [%i1 + 0x40], #one_read
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 9f7740eee8d2..e2027f27c0fe 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -463,7 +463,7 @@ out_of_memory:
up_read(&mm->mmap_sem);
printk("VM: killing process %s\n", current->comm);
if (!(regs->tstate & TSTATE_PRIV))
- do_exit(SIGKILL);
+ do_group_exit(SIGKILL);
goto handle_kernel_fault;
intr_or_no_mm:
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 3010227fe243..100c4456ed1e 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -631,7 +631,6 @@ void prom_world(int enter)
__asm__ __volatile__("flushw");
}
-#ifdef DCACHE_ALIASING_POSSIBLE
void __flush_dcache_range(unsigned long start, unsigned long end)
{
unsigned long va;
@@ -655,7 +654,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
"i" (ASI_DCACHE_INVALIDATE));
}
}
-#endif /* DCACHE_ALIASING_POSSIBLE */
/* get_new_mmu_context() uses "cache + 1". */
DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -1647,6 +1645,58 @@ EXPORT_SYMBOL(_PAGE_E);
unsigned long _PAGE_CACHE __read_mostly;
EXPORT_SYMBOL(_PAGE_CACHE);
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+
+#define VMEMMAP_CHUNK_SHIFT 22
+#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
+#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
+#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
+
+#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
+ sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
+unsigned long vmemmap_table[VMEMMAP_SIZE];
+
+int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
+{
+ unsigned long vstart = (unsigned long) start;
+ unsigned long vend = (unsigned long) (start + nr);
+ unsigned long phys_start = (vstart - VMEMMAP_BASE);
+ unsigned long phys_end = (vend - VMEMMAP_BASE);
+ unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
+ unsigned long end = VMEMMAP_ALIGN(phys_end);
+ unsigned long pte_base;
+
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+ _PAGE_CP_4U | _PAGE_CV_4U |
+ _PAGE_P_4U | _PAGE_W_4U);
+ if (tlb_type == hypervisor)
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
+ _PAGE_CP_4V | _PAGE_CV_4V |
+ _PAGE_P_4V | _PAGE_W_4V);
+
+ for (; addr < end; addr += VMEMMAP_CHUNK) {
+ unsigned long *vmem_pp =
+ vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
+ void *block;
+
+ if (!(*vmem_pp & _PAGE_VALID)) {
+ block = vmemmap_alloc_block(1UL << 22, node);
+ if (!block)
+ return -ENOMEM;
+
+ *vmem_pp = pte_base | __pa(block);
+
+ printk(KERN_INFO "[%p-%p] page_structs=%lu "
+ "node=%d entry=%lu/%lu\n", start, block, nr,
+ node,
+ addr >> VMEMMAP_CHUNK_SHIFT,
+ VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT);
+ }
+ }
+ return 0;
+}
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
static void prot_init_common(unsigned long page_none,
unsigned long page_shared,
unsigned long page_copy,
@@ -1911,9 +1961,4 @@ void online_page(struct page *page)
num_physpages++;
}
-int remove_memory(u64 start, u64 size)
-{
- return -EINVAL;
-}
-
#endif /* CONFIG_MEMORY_HOTPLUG */