aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/sn/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/sn/kernel/irq.c')
-rw-r--r--arch/ia64/sn/kernel/irq.c255
1 files changed, 135 insertions, 120 deletions
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 0f4e8138658..84d276a14ec 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -9,13 +9,13 @@
*/
#include <linux/irq.h>
-#include <asm/sn/intr.h>
+#include <linux/spinlock.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
-#include "xtalk/xwidgetdev.h"
+#include <asm/sn/intr.h>
+#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
-#include "pci/pcibr_provider.h"
#include <asm/sn/shub_mmr.h>
#include <asm/sn/sn_sal.h>
@@ -25,7 +25,8 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
extern int sn_force_interrupt_flag;
extern int sn_ioif_inited;
-struct sn_irq_info **sn_irq;
+static struct list_head **sn_irq_lh;
+static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget,
u64 sn_irq_info,
@@ -101,7 +102,7 @@ static void sn_end_irq(unsigned int irq)
nasid = get_nasid();
event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR
(nasid, SH_EVENT_OCCURRED));
- /* If the UART bit is set here, we may have received an
+ /* If the UART bit is set here, we may have received an
* interrupt from the UART that the driver missed. To
* make sure, we IPI ourselves to force us to look again.
*/
@@ -115,82 +116,84 @@ static void sn_end_irq(unsigned int irq)
force_interrupt(irq);
}
+static void sn_irq_info_free(struct rcu_head *head);
+
static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
{
- struct sn_irq_info *sn_irq_info = sn_irq[irq];
- struct sn_irq_info *tmp_sn_irq_info;
+ struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
int cpuid, cpuphys;
- nasid_t t_nasid; /* nasid to target */
- int t_slice; /* slice to target */
-
- /* allocate a temp sn_irq_info struct to get new target info */
- tmp_sn_irq_info = kmalloc(sizeof(*tmp_sn_irq_info), GFP_KERNEL);
- if (!tmp_sn_irq_info)
- return;
cpuid = first_cpu(mask);
cpuphys = cpu_physical_id(cpuid);
- t_nasid = cpuid_to_nasid(cpuid);
- t_slice = cpuid_to_slice(cpuid);
- while (sn_irq_info) {
- int status;
- int local_widget;
- uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
- nasid_t local_nasid = NASID_GET(bridge);
+ list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
+ sn_irq_lh[irq], list) {
+ uint64_t bridge;
+ int local_widget, status;
+ nasid_t local_nasid;
+ struct sn_irq_info *new_irq_info;
+
+ new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
+ if (new_irq_info == NULL)
+ break;
+ memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
+
+ bridge = (uint64_t) new_irq_info->irq_bridge;
+ if (!bridge) {
+ kfree(new_irq_info);
+ break; /* irq is not a device interrupt */
+ }
- if (!bridge)
- break; /* irq is not a device interrupt */
+ local_nasid = NASID_GET(bridge);
if (local_nasid & 1)
local_widget = TIO_SWIN_WIDGETNUM(bridge);
else
local_widget = SWIN_WIDGETNUM(bridge);
- /* Free the old PROM sn_irq_info structure */
- sn_intr_free(local_nasid, local_widget, sn_irq_info);
+ /* Free the old PROM new_irq_info structure */
+ sn_intr_free(local_nasid, local_widget, new_irq_info);
+ /* Update kernels new_irq_info with new target info */
+ unregister_intr_pda(new_irq_info);
- /* allocate a new PROM sn_irq_info struct */
+ /* allocate a new PROM new_irq_info struct */
status = sn_intr_alloc(local_nasid, local_widget,
- __pa(tmp_sn_irq_info), irq, t_nasid,
- t_slice);
-
- if (status == 0) {
- /* Update kernels sn_irq_info with new target info */
- unregister_intr_pda(sn_irq_info);
- sn_irq_info->irq_cpuid = cpuid;
- sn_irq_info->irq_nasid = t_nasid;
- sn_irq_info->irq_slice = t_slice;
- sn_irq_info->irq_xtalkaddr =
- tmp_sn_irq_info->irq_xtalkaddr;
- sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie;
- register_intr_pda(sn_irq_info);
-
- if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) {
- pcibr_change_devices_irq(sn_irq_info);
- }
+ __pa(new_irq_info), irq,
+ cpuid_to_nasid(cpuid),
+ cpuid_to_slice(cpuid));
+
+ /* SAL call failed */
+ if (status) {
+ kfree(new_irq_info);
+ break;
+ }
+
+ new_irq_info->irq_cpuid = cpuid;
+ register_intr_pda(new_irq_info);
+
+ if (IS_PCI_BRIDGE_ASIC(new_irq_info->irq_bridge_type))
+ pcibr_change_devices_irq(new_irq_info);
- sn_irq_info = sn_irq_info->irq_next;
+ spin_lock(&sn_irq_info_lock);
+ list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
+ spin_unlock(&sn_irq_info_lock);
+ call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
#ifdef CONFIG_SMP
- set_irq_affinity_info((irq & 0xff), cpuphys, 0);
+ set_irq_affinity_info((irq & 0xff), cpuphys, 0);
#endif
- } else {
- break; /* snp_affinity failed the intr_alloc */
- }
}
- kfree(tmp_sn_irq_info);
}
struct hw_interrupt_type irq_type_sn = {
- "SN hub",
- sn_startup_irq,
- sn_shutdown_irq,
- sn_enable_irq,
- sn_disable_irq,
- sn_ack_irq,
- sn_end_irq,
- sn_set_affinity_irq
+ .typename = "SN hub",
+ .startup = sn_startup_irq,
+ .shutdown = sn_shutdown_irq,
+ .enable = sn_enable_irq,
+ .disable = sn_disable_irq,
+ .ack = sn_ack_irq,
+ .end = sn_end_irq,
+ .set_affinity = sn_set_affinity_irq
};
unsigned int sn_local_vector_to_irq(u8 vector)
@@ -231,19 +234,18 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
struct sn_irq_info *tmp_irq_info;
int i, foundmatch;
+ rcu_read_lock();
if (pdacpu(cpu)->sn_last_irq == irq) {
foundmatch = 0;
- for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) {
- tmp_irq_info = sn_irq[i];
- while (tmp_irq_info) {
+ for (i = pdacpu(cpu)->sn_last_irq - 1;
+ i && !foundmatch; i--) {
+ list_for_each_entry_rcu(tmp_irq_info,
+ sn_irq_lh[i],
+ list) {
if (tmp_irq_info->irq_cpuid == cpu) {
- foundmatch++;
+ foundmatch = 1;
break;
}
- tmp_irq_info = tmp_irq_info->irq_next;
- }
- if (foundmatch) {
- break;
}
}
pdacpu(cpu)->sn_last_irq = i;
@@ -251,60 +253,27 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
if (pdacpu(cpu)->sn_first_irq == irq) {
foundmatch = 0;
- for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) {
- tmp_irq_info = sn_irq[i];
- while (tmp_irq_info) {
+ for (i = pdacpu(cpu)->sn_first_irq + 1;
+ i < NR_IRQS && !foundmatch; i++) {
+ list_for_each_entry_rcu(tmp_irq_info,
+ sn_irq_lh[i],
+ list) {
if (tmp_irq_info->irq_cpuid == cpu) {
- foundmatch++;
+ foundmatch = 1;
break;
}
- tmp_irq_info = tmp_irq_info->irq_next;
- }
- if (foundmatch) {
- break;
}
}
pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
}
+ rcu_read_unlock();
}
-struct sn_irq_info *sn_irq_alloc(nasid_t local_nasid, int local_widget, int irq,
- nasid_t nasid, int slice)
+static void sn_irq_info_free(struct rcu_head *head)
{
struct sn_irq_info *sn_irq_info;
- int status;
-
- sn_irq_info = kmalloc(sizeof(*sn_irq_info), GFP_KERNEL);
- if (sn_irq_info == NULL)
- return NULL;
-
- memset(sn_irq_info, 0x0, sizeof(*sn_irq_info));
-
- status =
- sn_intr_alloc(local_nasid, local_widget, __pa(sn_irq_info), irq,
- nasid, slice);
-
- if (status) {
- kfree(sn_irq_info);
- return NULL;
- } else {
- return sn_irq_info;
- }
-}
-
-void sn_irq_free(struct sn_irq_info *sn_irq_info)
-{
- uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
- nasid_t local_nasid = NASID_GET(bridge);
- int local_widget;
-
- if (local_nasid & 1) /* tio check */
- local_widget = TIO_SWIN_WIDGETNUM(bridge);
- else
- local_widget = SWIN_WIDGETNUM(bridge);
-
- sn_intr_free(local_nasid, local_widget, sn_irq_info);
+ sn_irq_info = container_of(head, struct sn_irq_info, rcu);
kfree(sn_irq_info);
}
@@ -314,30 +283,54 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
int slice = sn_irq_info->irq_slice;
int cpu = nasid_slice_to_cpuid(nasid, slice);
+ pci_dev_get(pci_dev);
sn_irq_info->irq_cpuid = cpu;
sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
/* link it into the sn_irq[irq] list */
- sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq];
- sn_irq[sn_irq_info->irq_irq] = sn_irq_info;
+ spin_lock(&sn_irq_info_lock);
+ list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+ spin_unlock(&sn_irq_info_lock);
(void)register_intr_pda(sn_irq_info);
}
+void sn_irq_unfixup(struct pci_dev *pci_dev)
+{
+ struct sn_irq_info *sn_irq_info;
+
+ /* Only cleanup IRQ stuff if this device has a host bus context */
+ if (!SN_PCIDEV_BUSSOFT(pci_dev))
+ return;
+
+ sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
+ if (!sn_irq_info || !sn_irq_info->irq_irq) {
+ kfree(sn_irq_info);
+ return;
+ }
+
+ unregister_intr_pda(sn_irq_info);
+ spin_lock(&sn_irq_info_lock);
+ list_del_rcu(&sn_irq_info->list);
+ spin_unlock(&sn_irq_info_lock);
+ call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+ pci_dev_put(pci_dev);
+}
+
static void force_interrupt(int irq)
{
struct sn_irq_info *sn_irq_info;
if (!sn_ioif_inited)
return;
- sn_irq_info = sn_irq[irq];
- while (sn_irq_info) {
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) {
if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
- (sn_irq_info->irq_bridge != NULL)) {
+ (sn_irq_info->irq_bridge != NULL))
pcibr_force_interrupt(sn_irq_info);
- }
- sn_irq_info = sn_irq_info->irq_next;
}
+ rcu_read_unlock();
}
/*
@@ -402,19 +395,41 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
void sn_lb_int_war_check(void)
{
+ struct sn_irq_info *sn_irq_info;
int i;
if (!sn_ioif_inited || pda->sn_first_irq == 0)
return;
+
+ rcu_read_lock();
for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
- struct sn_irq_info *sn_irq_info = sn_irq[i];
- while (sn_irq_info) {
- /* Only call for PCI bridges that are fully initialized. */
+ list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
+ /*
+ * Only call for PCI bridges that are fully
+ * initialized.
+ */
if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
- (sn_irq_info->irq_bridge != NULL)) {
+ (sn_irq_info->irq_bridge != NULL))
sn_check_intr(i, sn_irq_info);
- }
- sn_irq_info = sn_irq_info->irq_next;
}
}
+ rcu_read_unlock();
+}
+
+void sn_irq_lh_init(void)
+{
+ int i;
+
+ sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL);
+ if (!sn_irq_lh)
+ panic("SN PCI INIT: Failed to allocate memory for PCI init\n");
+
+ for (i = 0; i < NR_IRQS; i++) {
+ sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!sn_irq_lh[i])
+ panic("SN PCI INIT: Failed IRQ memory allocation\n");
+
+ INIT_LIST_HEAD(sn_irq_lh[i]);
+ }
+
}