summaryrefslogtreecommitdiff
path: root/big-little/common/vgiclib.c
blob: bde8089b6abb39986c561aad2deb2749613fdd1d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
/*
 * Copyright (c) 2012, ARM Limited. All rights reserved.
 *       
 * Redistribution and use in source and binary forms, with
 * or without modification, are permitted provided that the
 * following conditions are met:
 *     
 * Redistributions of source code must retain the above
 * copyright notice, this list of conditions and the 
 * following disclaimer.
 *
 * Redistributions in binary form must reproduce the
 * above copyright notice, this list of conditions and 
 * the following disclaimer in the documentation 
 * and/or other materials provided with the distribution.
 *      
 * Neither the name of ARM nor the names of its
 * contributors may be used to endorse or promote products
 * derived from this software without specific prior written
 * permission.                        
 */

#include "vgiclib.h"
#include "misc.h"
#include "virt_helpers.h"
#include "int_master.h"
#include "ipi.h"
#include "events.h"

extern unsigned get_cpuinfo(unsigned);

/*
 * Manage overflowints somehow.. static pool with recycling allocators. 
 */
#define MAXOVERFLOWINTS 200

static struct overflowint *freeoverflows[NUM_CPUS];
static struct overflowint theoverflowints[NUM_CPUS][MAXOVERFLOWINTS];
static struct gic_cpuif cpuifs[NUM_CPUS];
static unsigned hv_lr_count[NUM_CPUS] = { 0 };

static mig_irq_info migrated_irqs[NUM_CPUS][MAX_MIG_IRQS] = {0};

static void free_overflowint(struct overflowint *p, unsigned cpuid)
{
	p->next = freeoverflows[cpuid];
	freeoverflows[cpuid] = p;
}

/*
 * The vGIC spec implements 64 list registers across two 32-bit status
 * registers. Since all of the list registers may not be implemented,
 * this function returns the maximum index we need to bother about.
 */
static inline unsigned elrsr_max_index(unsigned cpuid)
{
	return (hv_lr_count[cpuid] - 1) >> 5;
}

/*
 * In a HYP view list register status register both active and unimplemented
 * interrupts are represented by a 0 bit. This function returns a 32-bit value
 * where each set bit represents an active list register. Its basically the
 * inverse of what the elrsr returns while taking into account unimplemented
 * interrupts.
 */
static unsigned get_elrsr_active_bits(unsigned index, unsigned cpuid,
				      unsigned max_index)
{
	unsigned elrsr =
	    ~(read32(VGIC_HV_PHY_BASE + GICH_ELRSR0 + (index << 2)));

	if (index == max_index) {
		/*
		 * Get the remainder, shift 1 times remainder and subtract 1
		 * from it to form the mask.
		 */
		elrsr &= (1 << (hv_lr_count[cpuid] - (32 * max_index))) - 1;
	} else if (index > max_index) {
		/*
		 * There can never be active virqs when the list registers
		 * do not exist.
		 */
		elrsr = 0;
	}

	return elrsr;
}


/*
 * For a given interrupt and cpu id, this function will
 * check whether its virq is not inactive and return the
 * descriptor to the caller.
 */
static unsigned dequeue_virq(unsigned irq, unsigned cpu_id)
{
	unsigned list_reg = 0, max_index = elrsr_max_index(cpu_id), ctr = 0;
	unsigned cur_elrsr = 0, i = 0;
	struct gic_cpuif *cpuif = &cpuifs[cpu_id];
	struct overflowint *ovflow = cpuif->overflow, *ovflowp = cpuif->overflow;

	/* First check the hw list registers */
	for (ctr = 0; ctr <= max_index; ctr++) {
		cur_elrsr = get_elrsr_active_bits(ctr, cpu_id, max_index);

		for (i = bitindex(cur_elrsr); ((int)i) >= 0; i = bitindex(cur_elrsr)) {
			unsigned int_id = 0;

			list_reg = read32(VGIC_HV_PHY_BASE + GICH_LR_BASE + ((1 << 7) * ctr) + (i << 2));
			int_id = (list_reg >> 10) & 0x3ff;

			/* Clear the current bit */
			cur_elrsr &= ~(1 << i);

			/* Set this entry as free */
			cpuif->freelist |= (1 << i);

			if (irq == int_id) {
				/*
				 * Invalidate the list register entry if the ids match and return
				 */
				write32(VGIC_HV_PHY_BASE + GICH_LR_BASE + ((1 << 7) * ctr) + (i << 2), list_reg & ~(0x3 << 28));
				return list_reg;
			}

		}
	}

	/* Check the sw linked list for the presence of this interrupt */
	while (ovflow) {
		unsigned int_id = (ovflow->value >> 10 )& 0x3ff;
		unsigned type = ovflow->value & HW_IRQ;

		if ((type == HW_IRQ) && (int_id == irq)) {
			list_reg = cpuif->overflow->value;
			ovflowp = ovflow;
			ovflow = ovflow->next;
			free_overflowint(ovflowp, cpu_id);
			break;
		}

		ovflow = ovflow->next;
	}

	return 0;
}

/*
 * Given the original and current ICDIPTR & its offset
 * find the ids of the interrupts that have been migrated.
 * To do this:
 * We XOR the two values to find which bytes differs (each
 * byte corresponds to a single interrupt).
 * The offset is from the start of the ICDIPTR map (0x800)
 * & indicates the number of irqs covered so far.
 * Adding it to the 'ctr' gives the exact irq id.
 *
 * Also find the previous and current cpu interface ids.
 * Lastly, find if there is a virq pending for the migrated
 * interrupt and return a cpu mask for asking other cpus to
 * complete the migration.
 */
static unsigned set_mig_irq_info(unsigned orig, unsigned curr, unsigned icdiptr_offset)
{
	unsigned ctr, diff = orig ^ curr, cpu_mask = 0;
	unsigned desc = 0, cpu_id = read_cpuid();

	for (ctr = 0; ctr < MAX_MIG_IRQS; ctr++) {
		if ((diff >> (ctr << 3)) & 0xff) {
			migrated_irqs[cpu_id][ctr].id = icdiptr_offset + ctr;
			migrated_irqs[cpu_id][ctr].src_cpuif = bitindex((orig >> (ctr << 3)) & 0xff);
			migrated_irqs[cpu_id][ctr].dest_cpuif = bitindex((curr >> (ctr << 3)) & 0xff);
			desc = dequeue_virq(migrated_irqs[cpu_id][ctr].id, cpu_id);
			if (desc) {
				migrated_irqs[cpu_id][ctr].desc = desc;
				cpu_mask |= 1 << migrated_irqs[cpu_id][ctr].dest_cpuif;
			}
		}
	}

	return cpu_mask;
}

/*
 * Find if any virqs are pending in the list registers for the physical interrupt(s)
 * that have just been migrated. Save this information and ask the target cpu to
 * enqueue them.
 */
unsigned start_virq_migration(unsigned orig, unsigned curr, unsigned icdiptr_offset)
{
	unsigned virq_mig_mask = 0;

	/* Find the pending virqs */
	virq_mig_mask = set_mig_irq_info(orig, curr, icdiptr_offset);

	/*
	 * Ask the new target cpu interface to enqueue our
	 * pending virtual interrupts.
	 */
	if (virq_mig_mask) {
		send_hyp_ipi(virq_mig_mask, IPI_MIGRATE_VIRQS);
		wait_for_events(VIRQ_MIG_DONE, get_cpu_mask(virq_mig_mask));
		return 1;
	}

	return 0;
}

/*
 * Pick up the migrated virqs from the cpu that sent them
 */
void complete_virq_migration(unsigned src_cpuid)
{
	unsigned ctr, cpu_id = read_cpuid();
	unsigned cluster_id = read_clusterid();
	unsigned dest_cpuid = 0;

	for (ctr = 0; ctr < MAX_MIG_IRQS; ctr++) {
		/*
		 * Compare the cpu id instead of the cpu interface id in case
		 * a switch took place before the virq migration ipi was recieved.
		 */
		dest_cpuid = get_cpuinfo(migrated_irqs[src_cpuid][ctr].dest_cpuif) & 0xf;
		if (migrated_irqs[src_cpuid][ctr].desc && dest_cpuid == cpu_id)
			enqueue_interrupt(migrated_irqs[src_cpuid][ctr].desc, cpu_id);
	}

	set_event(VIRQ_MIG_DONE, src_cpuid);
	return;
}


void dump_vgic_state()
{
	unsigned int i;

	printf("VGIC state:\n");
	printf(" Control  : 0x%x \n", read32(VGIC_HV_PHY_BASE + GICH_CTL));
	printf(" ActivePri: 0x%x \n", read32(VGIC_HV_PHY_BASE + GICH_APR0));
	for (i = 0; i < 4; i++) {
		printf(" List     : 0x%x \n",
		       read32(VGIC_HV_PHY_BASE + GICH_LR_BASE + (i * 4)));
	}
}

static struct overflowint *get_overflowint(unsigned cpuid)
{
	struct overflowint *p = freeoverflows[cpuid];

	if (!p) {
		printf("Panic: Out of overflow interrupt slots.\n");
		printf("Recompile with larger MAXOVERFLOWINTS.\n");
		panic();
	}

	freeoverflows[cpuid] = p->next;

	return p;
}

void vgic_init(void)
{
	unsigned int i;
	unsigned cpuid = read_cpuid();

	freeoverflows[cpuid] = 0x0;

	for (i = 0; i < MAXOVERFLOWINTS; i++) {
		free_overflowint(&(theoverflowints[cpuid][i]), cpuid);
	}

	/* 
	 * Find the number of List registers 
	 * TODO: Will not work if individual cpus can have different number
	 * of list registers across clusters. Needs to be detected for each
	 * access then.
	 */
	hv_lr_count[cpuid] = (read32(VGIC_HV_PHY_BASE + GICH_VTR) & 0x3f) + 1;

	/* Enable virtual interrupts & if required, maintenance interrupts */
	write32(VGIC_HV_PHY_BASE + GICH_CTL, VGICH_HCR_EN);

	return;
}

/*
 * Abstracted entry accessor functions.  Work for live or saved state 
 */
static void set_vgic_entry(unsigned int descr, unsigned int slot)
{
	write32(VGIC_HV_PHY_BASE + GICH_LR_BASE + (slot * 4), descr);
}

static unsigned int get_vgic_entry(unsigned int slot)
{
	return read32(VGIC_HV_PHY_BASE + GICH_LR_BASE + (slot * 4));
}

/*
 * Abstracted status accessor functions, as above 
 */
static void set_vgic_status(unsigned int status)
{
	write32(VGIC_HV_PHY_BASE + GICH_CTL, status);
}

static unsigned int get_vgic_status(void)
{
	return read32(VGIC_HV_PHY_BASE + GICH_CTL);
}

/*
 * Add an entry to the queue, the queue is kept in descending priority
 * * (that is to say, ascending numerical priority) order.
 * *
 * * Static function to assist with this, only called if the int is going in the queue.
 */
static void set_vgic_queue_entry(struct gic_cpuif *cpuif, unsigned int descr)
{
	unsigned int pri = (descr >> 20) & 0xFF;
	struct overflowint **oflowh, *oflowp;
	unsigned cpuid = read_cpuid();

	/*
	 * If we are queuing something and there is currently no queue, set the interrupt bit 
	 */
	if (!(cpuif->overflow))
		set_vgic_status(get_vgic_status() | 0x2);

	/*
	 * Determine insertion point, might be the end of the list 
	 */
	for (oflowh = &(cpuif->overflow); *oflowh; oflowh = &((*oflowh)->next))
		if ((*oflowh)->priority > pri)
			break;

	oflowp = get_overflowint(cpuid);
	oflowp->priority = pri;
	oflowp->value = descr;
	oflowp->next = *oflowh;
	*oflowh = oflowp;
}

void vgic_savestate(unsigned int cpu)
{
	struct gic_cpuif *cpuif = &(cpuifs[cpu]);
	unsigned int i, ctr = 0, cur_elrsr = 0;
	unsigned max_index = elrsr_max_index(cpu);

	for (ctr = 0; ctr <= max_index; ctr++) {
		/* Negate read value so that set bit corresponds to a !inactive register */
		cur_elrsr = get_elrsr_active_bits(ctr, cpu, max_index);
		cpuif->elrsr[ctr] = cur_elrsr;

		for (i = bitindex(cur_elrsr); ((int)i) >= 0;
		     i = bitindex(cur_elrsr)) {
			unsigned list_reg =
			    read32(VGIC_HV_PHY_BASE + GICH_LR_BASE +
				   ((1 << 7) * ctr) + (i << 2));
			unsigned int_id = (list_reg >> 10) & 0x3ff;

			/* Clear the saved bit index */
			cur_elrsr &= ~(1 << i);

			/* 
			 * Invalidate the pending/active virtual interrupt. Since its a shared vGIC
			 * this irq will persist till the next switch and hence create a duplicate.
			 */
			write32(VGIC_HV_PHY_BASE + GICH_LR_BASE +
				((1 << 7) * ctr) + (i << 2),
				list_reg & ~(0x3 << 28));

			/*
			 * While saving queued IPI context, ensure that the requesting cpu
			 * interface is mapped to it counterpart on the inbound cluster
			 */
			if (int_id < 16) {
				unsigned ob_cpuid = int_id & 0x7;
				unsigned ob_clusterid = read_clusterid();
				unsigned ib_cpuif = 0;

				ib_cpuif = get_cpuif(!ob_clusterid, ob_cpuid);
				/* Clear the cpu interface bits and place inbound cpu interface instead */
				list_reg =
				    (list_reg & ~(0x7 << 10)) | (ib_cpuif <<
								 10);
			} else if (int_id < 32) {
				/*
				 * Pending Private peripheral interrupts will be recreated from scratch
				 * so no need to save them.
				 */
				cpuif->elrsr[ctr] &= ~(1 << i);
				continue;
			}

			cpuif->ints[i] = list_reg;

		}
	}

	cpuif->status = read32(VGIC_HV_PHY_BASE + GICH_CTL);
	cpuif->activepris = read32(VGIC_HV_PHY_BASE + GICH_APR0);
	cpuif->freelist = read32(VGIC_HV_PHY_BASE + GICH_APR0);

	write32(VGIC_HV_PHY_BASE + GICH_CTL, 0);	/* SMP */

	return;
}

void vgic_loadstate(unsigned int cpu)
{
	struct gic_cpuif *cpuif = &(cpuifs[cpu]);
	unsigned int i, ctr = 0, cur_elrsr = 0;
	unsigned max_index = elrsr_max_index(cpu);

	for (ctr = 0; ctr <= max_index; ctr++) {
		cur_elrsr = cpuif->elrsr[ctr];

		for (i = bitindex(cur_elrsr); ((int)i) >= 0;
		     i = bitindex(cur_elrsr)) {
			write32(VGIC_HV_PHY_BASE + GICH_LR_BASE +
				((1 << 7) * ctr) + (i << 2), cpuif->ints[i]);

			/* Clear the restored bit index */
			cur_elrsr &= ~(1 << i);
		}
	}

	write32(VGIC_HV_PHY_BASE + GICH_CTL, cpuif->status);
	write32(VGIC_HV_PHY_BASE + GICH_APR0, cpuif->activepris);

	return;
}

/*
 * vgic_refresh: Generic "maintenance" routine for the VGIC
 * *
 * * This is called:
 * *  - On maintenance interrupt.  We get maintenance interrupts for 
 * *    two reasons: 
 * *    o Non-zero EOI skid.  This routine deals with the skid and sets
 * *      the field to 0, quenching the interrupt source.
 * *    o "Nearly empty" interrupt bit set, and nearly empty condition 
 * *      exists.  This interrupt source is quenched by filling the 
 * *      slots (and clearing the interrupt bit if the queue is now empty)
 * *  - When a new interrupt arrives and the cached "free slot" value
 * *    indicates that there are no free slots.  We expect to scavenge some
 * *    slots from interrupts which have been completed by the VM.
 * *
 * * This routine is O(n) in the number of skidded EOI's + O(m) in the number
 * * of interrupt slots provided - since this is constant for an
 * * implementation it's really O(1).
 * *
 * * If this VGIC instance is currently live on a CPU it is only legal to
 * * execute this routine on that CPU.
 */
void vgic_refresh(unsigned int cpu)
{
	struct gic_cpuif *cpuif = &(cpuifs[cpu]);
	unsigned int i, value, status, newstatus;
	struct overflowint **oflowh, *oflowp;

	/*
	 * Grab a copy of the status. 
	 */
	status = get_vgic_status();

	/*
	 * "newstatus" is the value to be written back if needed. Whatever
	 * * happens, we will clear the slipped EOI count by the time we are done 
	 */
	newstatus = status & 0x07FFFFFF;

	/*
	 * See if there are any "slipped" EOIs 
	 */
	i = (status >> 27) & 0x1F;

	if (i) {
		/*
		 * If there are, let's deal with them. 
		 * *
		 * * We will walk through the list of queued interrupts, deactivating the
		 * * ACTIVE ones as needed until we either have no more slipped EOI's to
		 * * do or run out of queued interrupts.  If we run out of queued
		 * * interrupts first, that's UNPREDICTABLE behaviour (and the fault of
		 * * the VM).  In this case we will just ignore the surplus EOIs.
		 * * 
		 * * After EOI'ing, we delete the entry if it was just ACTIVE or set it
		 * * to PENDING if it was PENDING+ACTIVE.
		 * *
		 * * Use a handle to point to the list entries to avoid the need for
		 * * special cases in the loop.
		 */
		oflowh = &(cpuif->overflow);

		while (i && *oflowh) {
			value = (*oflowh)->value;
			if (value & VGIC_ENTRY_ACTIVE) {
				/*
				 * It's ACTIVE (or PENDING+ACTIVE) 
				 */
				i--;

				if (value & VGIC_ENTRY_HW) {
					/*
					 * HW bit set, so we need to pass on an EOI.  This doesn't ever happen
					 * * for IPIs, so just pass on the 10-bit "Hardware ID" 
					 */
					gic_deactivate_int((value >> 10) &
							   0x3FF);
				}

				if (value & VGIC_ENTRY_PENDING) {
					/*
					 * It was PENDING+ACTIVE, clear the ACTIVE bit and move on 
					 */
					(*oflowh)->value &= ~VGIC_ENTRY_ACTIVE;
				} else {
					/*
					 * It was only ACTIVE, so we need to delete it.. 
					 */
					oflowp = *oflowh;
					oflowh = &(oflowp->next);
					free_overflowint(oflowp, cpu);
				}
			} else {
				/*
				 * It wasn't ACTIVE :( Try the next one. 
				 */
				oflowh = &((*oflowh)->next);
			}
		}
	}

	/*
	 * Now populate any spare slots with entries from the list (if any).  Also fix up the free slot bitmap 
	 */
	for (i = 0; i < hv_lr_count[cpu]; i++) {
		value = get_vgic_entry(i);

		if (value & 0x30000000) {
			/*
			 * This entry already contains a valid interrupt, skip 
			 */
			continue;
		}

		/*
		 * Not a valid interrupt 
		 */
		oflowp = cpuif->overflow;
		if (oflowp) {
			/*
			 * If there's a queue, move the top entry out of the queue and into
			 * * this slot.. 
			 */
			cpuif->overflow = oflowp->next;

			set_vgic_entry(oflowp->value, i);
			free_overflowint(oflowp, cpu);
		} else {
			/*
			 * .. otherwise mark it as available. 
			 */
			cpuif->freelist |= (1 << i);
		}
	}

	/*
	 * If we now don't have any overflow, clear the status bit 
	 */
	if (!(cpuif->overflow)) {
		newstatus &= ~0x2;
	}

	/*
	 * Refresh status if needed 
	 */
	if (newstatus != status) {
		set_vgic_status(newstatus);
	}
}

/*
 * Adds the interrupt specified to the active list of the CPU specified.
 * Expected to cope with the state being live on that CPU, or not.
 *
 * It's only valid to call this on the CPU which the corresponding VCPUIF is live on.
 *
 * This is O(n) in the number of queued interrupts on the CPUIF in question.
 */
void enqueue_interrupt(unsigned int descr, unsigned int cpu)
{
	unsigned int slot;
	struct gic_cpuif *cpuif;

	cpuif = &(cpuifs[cpu]);

	/*
	 * If there are no free slots, trigger a maintenance 
	 */
	if (!(cpuif->freelist)) {
		vgic_refresh(cpu);
	}

	if (cpuif->freelist) {
		/*
		 * There is a free slot, use it. 
		 */
		slot = cpuif->freelist;	/* Take the free list.. */
		slot &= (-slot);	/* .. extract one set bit .. */
		cpuif->freelist &= (~slot);	/* .. clear that bit from free list .. */
		slot = bitindex(slot);	/* .. and convert to number. */

		set_vgic_entry(descr, slot);
	} else {
		/*
		 * There are no free slots, we are either queuing this one or swapping another out 
		 */
		unsigned int pri = (descr >> 20) & 0xFF;
		unsigned int minpri = 0;
		unsigned int minslot = 0;
		unsigned int i, j;

		if (cpuif->overflow && cpuif->overflow->priority <= pri) {
			/*
			 * There are already queued interrupts with the same or higher priority, just queue this one 
			 */
			set_vgic_queue_entry(cpuif, descr);
			return;
		}

		/*
		 * Otherwise find the lowest priority entry.. 
		 */
		for (i = 0; i < hv_lr_count[cpu]; i++) {
			j = (get_vgic_entry(i) >> 20) & 0xFF;	/* Get the priority for the current thing in this slot */
			if (i == 0 || (j > minpri)) {
				minpri = j;
				minslot = i;
			}
		}

		if (minpri > pri) {
			/*
			 * If it's lower priority than this new one we kick it out 
			 */
			set_vgic_queue_entry(cpuif, get_vgic_entry(minslot));
			set_vgic_entry(descr, minslot);
		} else {
			/*
			 * Otherwise just queue the new one 
			 */
			set_vgic_queue_entry(cpuif, descr);
		}
	}
}