aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/kernel/irq.c
blob: 7d8951229e7c39f39fd333a518d4a7e633725a70 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
/*
 *	linux/arch/ia64/kernel/irq.c
 *
 *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
 *
 * This file contains the code used by various IRQ handling routines:
 * asking for different IRQs should be done through these routines
 * instead of just grabbing them. Thus setups with different IRQ numbers
 * shouldn't result in any weird surprises, and installing new handlers
 * should be easier.
 *
 * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
 *
 * 4/14/2004: Added code to handle cpu migration and do safe irq
 *			migration without losing interrupts for iosapic
 *			architecture.
 */

#include <asm/delay.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>

/*
 * 'what should we do if we get a hw irq event on an illegal vector'.
 * each architecture has to answer this themselves.
 */
void ack_bad_irq(unsigned int irq)
{
	printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
}

#ifdef CONFIG_IA64_GENERIC
ia64_vector __ia64_irq_to_vector(int irq)
{
	return irq_cfg[irq].vector;
}

unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
{
	return __get_cpu_var(vector_irq)[vec];
}
#endif

/*
 * Interrupt statistics:
 */

atomic_t irq_err_count;

/*
 * /proc/interrupts printing:
 */

int show_interrupts(struct seq_file *p, void *v)
{
	int i = *(loff_t *) v, j;
	struct irqaction * action;
	unsigned long flags;

	if (i == 0) {
		char cpuname[16];
		seq_printf(p, "     ");
		for_each_online_cpu(j) {
			snprintf(cpuname, 10, "CPU%d", j);
			seq_printf(p, "%10s ", cpuname);
		}
		seq_putc(p, '\n');
	}

	if (i < NR_IRQS) {
		spin_lock_irqsave(&irq_desc[i].lock, flags);
		action = irq_desc[i].action;
		if (!action)
			goto skip;
		seq_printf(p, "%3d: ",i);
#ifndef CONFIG_SMP
		seq_printf(p, "%10u ", kstat_irqs(i));
#else
		for_each_online_cpu(j) {
			seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
		}
#endif
		seq_printf(p, " %14s", irq_desc[i].chip->name);
		seq_printf(p, "  %s", action->name);

		for (action=action->next; action; action = action->next)
			seq_printf(p, ", %s", action->name);

		seq_putc(p, '\n');
skip:
		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
	} else if (i == NR_IRQS)
		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
	return 0;
}

#ifdef CONFIG_SMP
static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };

void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
{
	if (irq < NR_IRQS) {
		cpumask_copy(irq_desc[irq].affinity,
			     cpumask_of(cpu_logical_id(hwid)));
		irq_redir[irq] = (char) (redir & 0xff);
	}
}

bool is_affinity_mask_valid(const struct cpumask *cpumask)
{
	if (ia64_platform_is("sn2")) {
		/* Only allow one CPU to be specified in the smp_affinity mask */
		if (cpumask_weight(cpumask) != 1)
			return false;
	}
	return true;
}

#endif /* CONFIG_SMP */

#ifdef CONFIG_HOTPLUG_CPU
unsigned int vectors_in_migration[NR_IRQS];

/*
 * Since cpu_online_mask is already updated, we just need to check for
 * affinity that has zeros
 */
static void migrate_irqs(void)
{
	struct irq_desc *desc;
	int 		irq, new_cpu;

	for (irq=0; irq < NR_IRQS; irq++) {
		desc = irq_desc + irq;

		if (desc->status == IRQ_DISABLED)
			continue;

		/*
		 * No handling for now.
		 * TBD: Implement a disable function so we can now
		 * tell CPU not to respond to these local intr sources.
		 * such as ITV,CPEI,MCA etc.
		 */
		if (desc->status == IRQ_PER_CPU)
			continue;

		if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
		    >= nr_cpu_ids) {
			/*
			 * Save it for phase 2 processing
			 */
			vectors_in_migration[irq] = irq;

			new_cpu = cpumask_any(cpu_online_mask);

			/*
			 * Al three are essential, currently WARN_ON.. maybe panic?
			 */
			if (desc->chip && desc->chip->disable &&
				desc->chip->enable && desc->chip->set_affinity) {
				desc->chip->disable(irq);
				desc->chip->set_affinity(irq,
							 cpumask_of(new_cpu));
				desc->chip->enable(irq);
			} else {
				WARN_ON((!(desc->chip) || !(desc->chip->disable) ||
						!(desc->chip->enable) ||
						!(desc->chip->set_affinity)));
			}
		}
	}
}

void fixup_irqs(void)
{
	unsigned int irq;
	extern void ia64_process_pending_intr(void);
	extern volatile int time_keeper_id;

	/* Mask ITV to disable timer */
	ia64_set_itv(1 << 16);

	/*
	 * Find a new timesync master
	 */
	if (smp_processor_id() == time_keeper_id) {
		time_keeper_id = cpumask_first(cpu_online_mask);
		printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
	}

	/*
	 * Phase 1: Locate IRQs bound to this cpu and
	 * relocate them for cpu removal.
	 */
	migrate_irqs();

	/*
	 * Phase 2: Perform interrupt processing for all entries reported in
	 * local APIC.
	 */
	ia64_process_pending_intr();

	/*
	 * Phase 3: Now handle any interrupts not captured in local APIC.
	 * This is to account for cases that device interrupted during the time the
	 * rte was being disabled and re-programmed.
	 */
	for (irq=0; irq < NR_IRQS; irq++) {
		if (vectors_in_migration[irq]) {
			struct pt_regs *old_regs = set_irq_regs(NULL);

			vectors_in_migration[irq]=0;
			generic_handle_irq(irq);
			set_irq_regs(old_regs);
		}
	}

	/*
	 * Now let processor die. We do irq disable and max_xtp() to
	 * ensure there is no more interrupts routed to this processor.
	 * But the local timer interrupt can have 1 pending which we
	 * take care in timer_interrupt().
	 */
	max_xtp();
	local_irq_disable();
}
#endif