Marc Zyngier | cc2d321 | 2014-11-24 14:35:11 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. |
| 3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License version 2 as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/bitmap.h> |
| 19 | #include <linux/cpu.h> |
| 20 | #include <linux/delay.h> |
| 21 | #include <linux/interrupt.h> |
| 22 | #include <linux/log2.h> |
| 23 | #include <linux/mm.h> |
| 24 | #include <linux/msi.h> |
| 25 | #include <linux/of.h> |
| 26 | #include <linux/of_address.h> |
| 27 | #include <linux/of_irq.h> |
| 28 | #include <linux/of_pci.h> |
| 29 | #include <linux/of_platform.h> |
| 30 | #include <linux/percpu.h> |
| 31 | #include <linux/slab.h> |
| 32 | |
| 33 | #include <linux/irqchip/arm-gic-v3.h> |
| 34 | |
| 35 | #include <asm/cacheflush.h> |
| 36 | #include <asm/cputype.h> |
| 37 | #include <asm/exception.h> |
| 38 | |
| 39 | #include "irqchip.h" |
| 40 | |
| 41 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0) |
| 42 | |
Marc Zyngier | c48ed51 | 2014-11-24 14:35:12 +0000 | [diff] [blame] | 43 | #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) |
| 44 | |
Marc Zyngier | cc2d321 | 2014-11-24 14:35:11 +0000 | [diff] [blame] | 45 | /* |
| 46 | * Collection structure - just an ID, and a redistributor address to |
| 47 | * ping. We use one per CPU as a bag of interrupts assigned to this |
| 48 | * CPU. |
| 49 | */ |
| 50 | struct its_collection { |
| 51 | u64 target_address; |
| 52 | u16 col_id; |
| 53 | }; |
| 54 | |
| 55 | /* |
| 56 | * The ITS structure - contains most of the infrastructure, with the |
| 57 | * msi_controller, the command queue, the collections, and the list of |
| 58 | * devices writing to it. |
| 59 | */ |
| 60 | struct its_node { |
| 61 | raw_spinlock_t lock; |
| 62 | struct list_head entry; |
| 63 | struct msi_controller msi_chip; |
| 64 | struct irq_domain *domain; |
| 65 | void __iomem *base; |
| 66 | unsigned long phys_base; |
| 67 | struct its_cmd_block *cmd_base; |
| 68 | struct its_cmd_block *cmd_write; |
| 69 | void *tables[GITS_BASER_NR_REGS]; |
| 70 | struct its_collection *collections; |
| 71 | struct list_head its_device_list; |
| 72 | u64 flags; |
| 73 | u32 ite_size; |
| 74 | }; |
| 75 | |
| 76 | #define ITS_ITT_ALIGN SZ_256 |
| 77 | |
| 78 | /* |
| 79 | * The ITS view of a device - belongs to an ITS, a collection, owns an |
| 80 | * interrupt translation table, and a list of interrupts. |
| 81 | */ |
| 82 | struct its_device { |
| 83 | struct list_head entry; |
| 84 | struct its_node *its; |
| 85 | struct its_collection *collection; |
| 86 | void *itt; |
| 87 | unsigned long *lpi_map; |
| 88 | irq_hw_number_t lpi_base; |
| 89 | int nr_lpis; |
| 90 | u32 nr_ites; |
| 91 | u32 device_id; |
| 92 | }; |
| 93 | |
| 94 | /* |
| 95 | * ITS command descriptors - parameters to be encoded in a command |
| 96 | * block. |
| 97 | */ |
| 98 | struct its_cmd_desc { |
| 99 | union { |
| 100 | struct { |
| 101 | struct its_device *dev; |
| 102 | u32 event_id; |
| 103 | } its_inv_cmd; |
| 104 | |
| 105 | struct { |
| 106 | struct its_device *dev; |
| 107 | u32 event_id; |
| 108 | } its_int_cmd; |
| 109 | |
| 110 | struct { |
| 111 | struct its_device *dev; |
| 112 | int valid; |
| 113 | } its_mapd_cmd; |
| 114 | |
| 115 | struct { |
| 116 | struct its_collection *col; |
| 117 | int valid; |
| 118 | } its_mapc_cmd; |
| 119 | |
| 120 | struct { |
| 121 | struct its_device *dev; |
| 122 | u32 phys_id; |
| 123 | u32 event_id; |
| 124 | } its_mapvi_cmd; |
| 125 | |
| 126 | struct { |
| 127 | struct its_device *dev; |
| 128 | struct its_collection *col; |
| 129 | u32 id; |
| 130 | } its_movi_cmd; |
| 131 | |
| 132 | struct { |
| 133 | struct its_device *dev; |
| 134 | u32 event_id; |
| 135 | } its_discard_cmd; |
| 136 | |
| 137 | struct { |
| 138 | struct its_collection *col; |
| 139 | } its_invall_cmd; |
| 140 | }; |
| 141 | }; |
| 142 | |
| 143 | /* |
| 144 | * The ITS command block, which is what the ITS actually parses. |
| 145 | */ |
| 146 | struct its_cmd_block { |
| 147 | u64 raw_cmd[4]; |
| 148 | }; |
| 149 | |
| 150 | #define ITS_CMD_QUEUE_SZ SZ_64K |
| 151 | #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) |
| 152 | |
| 153 | typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, |
| 154 | struct its_cmd_desc *); |
| 155 | |
| 156 | static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) |
| 157 | { |
| 158 | cmd->raw_cmd[0] &= ~0xffUL; |
| 159 | cmd->raw_cmd[0] |= cmd_nr; |
| 160 | } |
| 161 | |
| 162 | static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) |
| 163 | { |
| 164 | cmd->raw_cmd[0] &= ~(0xffffUL << 32); |
| 165 | cmd->raw_cmd[0] |= ((u64)devid) << 32; |
| 166 | } |
| 167 | |
| 168 | static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) |
| 169 | { |
| 170 | cmd->raw_cmd[1] &= ~0xffffffffUL; |
| 171 | cmd->raw_cmd[1] |= id; |
| 172 | } |
| 173 | |
| 174 | static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) |
| 175 | { |
| 176 | cmd->raw_cmd[1] &= 0xffffffffUL; |
| 177 | cmd->raw_cmd[1] |= ((u64)phys_id) << 32; |
| 178 | } |
| 179 | |
| 180 | static void its_encode_size(struct its_cmd_block *cmd, u8 size) |
| 181 | { |
| 182 | cmd->raw_cmd[1] &= ~0x1fUL; |
| 183 | cmd->raw_cmd[1] |= size & 0x1f; |
| 184 | } |
| 185 | |
| 186 | static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) |
| 187 | { |
| 188 | cmd->raw_cmd[2] &= ~0xffffffffffffUL; |
| 189 | cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL; |
| 190 | } |
| 191 | |
| 192 | static void its_encode_valid(struct its_cmd_block *cmd, int valid) |
| 193 | { |
| 194 | cmd->raw_cmd[2] &= ~(1UL << 63); |
| 195 | cmd->raw_cmd[2] |= ((u64)!!valid) << 63; |
| 196 | } |
| 197 | |
| 198 | static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) |
| 199 | { |
| 200 | cmd->raw_cmd[2] &= ~(0xffffffffUL << 16); |
| 201 | cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16)); |
| 202 | } |
| 203 | |
| 204 | static void its_encode_collection(struct its_cmd_block *cmd, u16 col) |
| 205 | { |
| 206 | cmd->raw_cmd[2] &= ~0xffffUL; |
| 207 | cmd->raw_cmd[2] |= col; |
| 208 | } |
| 209 | |
| 210 | static inline void its_fixup_cmd(struct its_cmd_block *cmd) |
| 211 | { |
| 212 | /* Let's fixup BE commands */ |
| 213 | cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); |
| 214 | cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); |
| 215 | cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); |
| 216 | cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); |
| 217 | } |
| 218 | |
| 219 | static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, |
| 220 | struct its_cmd_desc *desc) |
| 221 | { |
| 222 | unsigned long itt_addr; |
| 223 | u8 size = order_base_2(desc->its_mapd_cmd.dev->nr_ites); |
| 224 | |
| 225 | itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); |
| 226 | itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); |
| 227 | |
| 228 | its_encode_cmd(cmd, GITS_CMD_MAPD); |
| 229 | its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); |
| 230 | its_encode_size(cmd, size - 1); |
| 231 | its_encode_itt(cmd, itt_addr); |
| 232 | its_encode_valid(cmd, desc->its_mapd_cmd.valid); |
| 233 | |
| 234 | its_fixup_cmd(cmd); |
| 235 | |
| 236 | return desc->its_mapd_cmd.dev->collection; |
| 237 | } |
| 238 | |
| 239 | static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, |
| 240 | struct its_cmd_desc *desc) |
| 241 | { |
| 242 | its_encode_cmd(cmd, GITS_CMD_MAPC); |
| 243 | its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); |
| 244 | its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); |
| 245 | its_encode_valid(cmd, desc->its_mapc_cmd.valid); |
| 246 | |
| 247 | its_fixup_cmd(cmd); |
| 248 | |
| 249 | return desc->its_mapc_cmd.col; |
| 250 | } |
| 251 | |
| 252 | static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd, |
| 253 | struct its_cmd_desc *desc) |
| 254 | { |
| 255 | its_encode_cmd(cmd, GITS_CMD_MAPVI); |
| 256 | its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id); |
| 257 | its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id); |
| 258 | its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id); |
| 259 | its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id); |
| 260 | |
| 261 | its_fixup_cmd(cmd); |
| 262 | |
| 263 | return desc->its_mapvi_cmd.dev->collection; |
| 264 | } |
| 265 | |
| 266 | static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, |
| 267 | struct its_cmd_desc *desc) |
| 268 | { |
| 269 | its_encode_cmd(cmd, GITS_CMD_MOVI); |
| 270 | its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); |
| 271 | its_encode_event_id(cmd, desc->its_movi_cmd.id); |
| 272 | its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); |
| 273 | |
| 274 | its_fixup_cmd(cmd); |
| 275 | |
| 276 | return desc->its_movi_cmd.dev->collection; |
| 277 | } |
| 278 | |
| 279 | static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, |
| 280 | struct its_cmd_desc *desc) |
| 281 | { |
| 282 | its_encode_cmd(cmd, GITS_CMD_DISCARD); |
| 283 | its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); |
| 284 | its_encode_event_id(cmd, desc->its_discard_cmd.event_id); |
| 285 | |
| 286 | its_fixup_cmd(cmd); |
| 287 | |
| 288 | return desc->its_discard_cmd.dev->collection; |
| 289 | } |
| 290 | |
| 291 | static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, |
| 292 | struct its_cmd_desc *desc) |
| 293 | { |
| 294 | its_encode_cmd(cmd, GITS_CMD_INV); |
| 295 | its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); |
| 296 | its_encode_event_id(cmd, desc->its_inv_cmd.event_id); |
| 297 | |
| 298 | its_fixup_cmd(cmd); |
| 299 | |
| 300 | return desc->its_inv_cmd.dev->collection; |
| 301 | } |
| 302 | |
| 303 | static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, |
| 304 | struct its_cmd_desc *desc) |
| 305 | { |
| 306 | its_encode_cmd(cmd, GITS_CMD_INVALL); |
| 307 | its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); |
| 308 | |
| 309 | its_fixup_cmd(cmd); |
| 310 | |
| 311 | return NULL; |
| 312 | } |
| 313 | |
| 314 | static u64 its_cmd_ptr_to_offset(struct its_node *its, |
| 315 | struct its_cmd_block *ptr) |
| 316 | { |
| 317 | return (ptr - its->cmd_base) * sizeof(*ptr); |
| 318 | } |
| 319 | |
| 320 | static int its_queue_full(struct its_node *its) |
| 321 | { |
| 322 | int widx; |
| 323 | int ridx; |
| 324 | |
| 325 | widx = its->cmd_write - its->cmd_base; |
| 326 | ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); |
| 327 | |
| 328 | /* This is incredibly unlikely to happen, unless the ITS locks up. */ |
| 329 | if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) |
| 330 | return 1; |
| 331 | |
| 332 | return 0; |
| 333 | } |
| 334 | |
| 335 | static struct its_cmd_block *its_allocate_entry(struct its_node *its) |
| 336 | { |
| 337 | struct its_cmd_block *cmd; |
| 338 | u32 count = 1000000; /* 1s! */ |
| 339 | |
| 340 | while (its_queue_full(its)) { |
| 341 | count--; |
| 342 | if (!count) { |
| 343 | pr_err_ratelimited("ITS queue not draining\n"); |
| 344 | return NULL; |
| 345 | } |
| 346 | cpu_relax(); |
| 347 | udelay(1); |
| 348 | } |
| 349 | |
| 350 | cmd = its->cmd_write++; |
| 351 | |
| 352 | /* Handle queue wrapping */ |
| 353 | if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) |
| 354 | its->cmd_write = its->cmd_base; |
| 355 | |
| 356 | return cmd; |
| 357 | } |
| 358 | |
| 359 | static struct its_cmd_block *its_post_commands(struct its_node *its) |
| 360 | { |
| 361 | u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); |
| 362 | |
| 363 | writel_relaxed(wr, its->base + GITS_CWRITER); |
| 364 | |
| 365 | return its->cmd_write; |
| 366 | } |
| 367 | |
| 368 | static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) |
| 369 | { |
| 370 | /* |
| 371 | * Make sure the commands written to memory are observable by |
| 372 | * the ITS. |
| 373 | */ |
| 374 | if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) |
| 375 | __flush_dcache_area(cmd, sizeof(*cmd)); |
| 376 | else |
| 377 | dsb(ishst); |
| 378 | } |
| 379 | |
| 380 | static void its_wait_for_range_completion(struct its_node *its, |
| 381 | struct its_cmd_block *from, |
| 382 | struct its_cmd_block *to) |
| 383 | { |
| 384 | u64 rd_idx, from_idx, to_idx; |
| 385 | u32 count = 1000000; /* 1s! */ |
| 386 | |
| 387 | from_idx = its_cmd_ptr_to_offset(its, from); |
| 388 | to_idx = its_cmd_ptr_to_offset(its, to); |
| 389 | |
| 390 | while (1) { |
| 391 | rd_idx = readl_relaxed(its->base + GITS_CREADR); |
| 392 | if (rd_idx >= to_idx || rd_idx < from_idx) |
| 393 | break; |
| 394 | |
| 395 | count--; |
| 396 | if (!count) { |
| 397 | pr_err_ratelimited("ITS queue timeout\n"); |
| 398 | return; |
| 399 | } |
| 400 | cpu_relax(); |
| 401 | udelay(1); |
| 402 | } |
| 403 | } |
| 404 | |
| 405 | static void its_send_single_command(struct its_node *its, |
| 406 | its_cmd_builder_t builder, |
| 407 | struct its_cmd_desc *desc) |
| 408 | { |
| 409 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; |
| 410 | struct its_collection *sync_col; |
| 411 | |
| 412 | raw_spin_lock(&its->lock); |
| 413 | |
| 414 | cmd = its_allocate_entry(its); |
| 415 | if (!cmd) { /* We're soooooo screewed... */ |
| 416 | pr_err_ratelimited("ITS can't allocate, dropping command\n"); |
| 417 | raw_spin_unlock(&its->lock); |
| 418 | return; |
| 419 | } |
| 420 | sync_col = builder(cmd, desc); |
| 421 | its_flush_cmd(its, cmd); |
| 422 | |
| 423 | if (sync_col) { |
| 424 | sync_cmd = its_allocate_entry(its); |
| 425 | if (!sync_cmd) { |
| 426 | pr_err_ratelimited("ITS can't SYNC, skipping\n"); |
| 427 | goto post; |
| 428 | } |
| 429 | its_encode_cmd(sync_cmd, GITS_CMD_SYNC); |
| 430 | its_encode_target(sync_cmd, sync_col->target_address); |
| 431 | its_fixup_cmd(sync_cmd); |
| 432 | its_flush_cmd(its, sync_cmd); |
| 433 | } |
| 434 | |
| 435 | post: |
| 436 | next_cmd = its_post_commands(its); |
| 437 | raw_spin_unlock(&its->lock); |
| 438 | |
| 439 | its_wait_for_range_completion(its, cmd, next_cmd); |
| 440 | } |
| 441 | |
| 442 | static void its_send_inv(struct its_device *dev, u32 event_id) |
| 443 | { |
| 444 | struct its_cmd_desc desc; |
| 445 | |
| 446 | desc.its_inv_cmd.dev = dev; |
| 447 | desc.its_inv_cmd.event_id = event_id; |
| 448 | |
| 449 | its_send_single_command(dev->its, its_build_inv_cmd, &desc); |
| 450 | } |
| 451 | |
| 452 | static void its_send_mapd(struct its_device *dev, int valid) |
| 453 | { |
| 454 | struct its_cmd_desc desc; |
| 455 | |
| 456 | desc.its_mapd_cmd.dev = dev; |
| 457 | desc.its_mapd_cmd.valid = !!valid; |
| 458 | |
| 459 | its_send_single_command(dev->its, its_build_mapd_cmd, &desc); |
| 460 | } |
| 461 | |
| 462 | static void its_send_mapc(struct its_node *its, struct its_collection *col, |
| 463 | int valid) |
| 464 | { |
| 465 | struct its_cmd_desc desc; |
| 466 | |
| 467 | desc.its_mapc_cmd.col = col; |
| 468 | desc.its_mapc_cmd.valid = !!valid; |
| 469 | |
| 470 | its_send_single_command(its, its_build_mapc_cmd, &desc); |
| 471 | } |
| 472 | |
| 473 | static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id) |
| 474 | { |
| 475 | struct its_cmd_desc desc; |
| 476 | |
| 477 | desc.its_mapvi_cmd.dev = dev; |
| 478 | desc.its_mapvi_cmd.phys_id = irq_id; |
| 479 | desc.its_mapvi_cmd.event_id = id; |
| 480 | |
| 481 | its_send_single_command(dev->its, its_build_mapvi_cmd, &desc); |
| 482 | } |
| 483 | |
| 484 | static void its_send_movi(struct its_device *dev, |
| 485 | struct its_collection *col, u32 id) |
| 486 | { |
| 487 | struct its_cmd_desc desc; |
| 488 | |
| 489 | desc.its_movi_cmd.dev = dev; |
| 490 | desc.its_movi_cmd.col = col; |
| 491 | desc.its_movi_cmd.id = id; |
| 492 | |
| 493 | its_send_single_command(dev->its, its_build_movi_cmd, &desc); |
| 494 | } |
| 495 | |
| 496 | static void its_send_discard(struct its_device *dev, u32 id) |
| 497 | { |
| 498 | struct its_cmd_desc desc; |
| 499 | |
| 500 | desc.its_discard_cmd.dev = dev; |
| 501 | desc.its_discard_cmd.event_id = id; |
| 502 | |
| 503 | its_send_single_command(dev->its, its_build_discard_cmd, &desc); |
| 504 | } |
| 505 | |
| 506 | static void its_send_invall(struct its_node *its, struct its_collection *col) |
| 507 | { |
| 508 | struct its_cmd_desc desc; |
| 509 | |
| 510 | desc.its_invall_cmd.col = col; |
| 511 | |
| 512 | its_send_single_command(its, its_build_invall_cmd, &desc); |
| 513 | } |
Marc Zyngier | c48ed51 | 2014-11-24 14:35:12 +0000 | [diff] [blame] | 514 | |
| 515 | /* |
| 516 | * irqchip functions - assumes MSI, mostly. |
| 517 | */ |
| 518 | |
| 519 | static inline u32 its_get_event_id(struct irq_data *d) |
| 520 | { |
| 521 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
| 522 | return d->hwirq - its_dev->lpi_base; |
| 523 | } |
| 524 | |
| 525 | static void lpi_set_config(struct irq_data *d, bool enable) |
| 526 | { |
| 527 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
| 528 | irq_hw_number_t hwirq = d->hwirq; |
| 529 | u32 id = its_get_event_id(d); |
| 530 | u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192; |
| 531 | |
| 532 | if (enable) |
| 533 | *cfg |= LPI_PROP_ENABLED; |
| 534 | else |
| 535 | *cfg &= ~LPI_PROP_ENABLED; |
| 536 | |
| 537 | /* |
| 538 | * Make the above write visible to the redistributors. |
| 539 | * And yes, we're flushing exactly: One. Single. Byte. |
| 540 | * Humpf... |
| 541 | */ |
| 542 | if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) |
| 543 | __flush_dcache_area(cfg, sizeof(*cfg)); |
| 544 | else |
| 545 | dsb(ishst); |
| 546 | its_send_inv(its_dev, id); |
| 547 | } |
| 548 | |
| 549 | static void its_mask_irq(struct irq_data *d) |
| 550 | { |
| 551 | lpi_set_config(d, false); |
| 552 | } |
| 553 | |
| 554 | static void its_unmask_irq(struct irq_data *d) |
| 555 | { |
| 556 | lpi_set_config(d, true); |
| 557 | } |
| 558 | |
| 559 | static void its_eoi_irq(struct irq_data *d) |
| 560 | { |
| 561 | gic_write_eoir(d->hwirq); |
| 562 | } |
| 563 | |
| 564 | static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
| 565 | bool force) |
| 566 | { |
| 567 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); |
| 568 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
| 569 | struct its_collection *target_col; |
| 570 | u32 id = its_get_event_id(d); |
| 571 | |
| 572 | if (cpu >= nr_cpu_ids) |
| 573 | return -EINVAL; |
| 574 | |
| 575 | target_col = &its_dev->its->collections[cpu]; |
| 576 | its_send_movi(its_dev, target_col, id); |
| 577 | its_dev->collection = target_col; |
| 578 | |
| 579 | return IRQ_SET_MASK_OK_DONE; |
| 580 | } |
| 581 | |
| 582 | static struct irq_chip its_irq_chip = { |
| 583 | .name = "ITS", |
| 584 | .irq_mask = its_mask_irq, |
| 585 | .irq_unmask = its_unmask_irq, |
| 586 | .irq_eoi = its_eoi_irq, |
| 587 | .irq_set_affinity = its_set_affinity, |
| 588 | }; |
Marc Zyngier | bf9529f | 2014-11-24 14:35:13 +0000 | [diff] [blame^] | 589 | |
| 590 | /* |
| 591 | * How we allocate LPIs: |
| 592 | * |
| 593 | * The GIC has id_bits bits for interrupt identifiers. From there, we |
| 594 | * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as |
| 595 | * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 |
| 596 | * bits to the right. |
| 597 | * |
| 598 | * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. |
| 599 | */ |
| 600 | #define IRQS_PER_CHUNK_SHIFT 5 |
| 601 | #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) |
| 602 | |
| 603 | static unsigned long *lpi_bitmap; |
| 604 | static u32 lpi_chunks; |
| 605 | static DEFINE_SPINLOCK(lpi_lock); |
| 606 | |
| 607 | static int its_lpi_to_chunk(int lpi) |
| 608 | { |
| 609 | return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; |
| 610 | } |
| 611 | |
| 612 | static int its_chunk_to_lpi(int chunk) |
| 613 | { |
| 614 | return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; |
| 615 | } |
| 616 | |
| 617 | static int its_lpi_init(u32 id_bits) |
| 618 | { |
| 619 | lpi_chunks = its_lpi_to_chunk(1UL << id_bits); |
| 620 | |
| 621 | lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long), |
| 622 | GFP_KERNEL); |
| 623 | if (!lpi_bitmap) { |
| 624 | lpi_chunks = 0; |
| 625 | return -ENOMEM; |
| 626 | } |
| 627 | |
| 628 | pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); |
| 629 | return 0; |
| 630 | } |
| 631 | |
| 632 | static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) |
| 633 | { |
| 634 | unsigned long *bitmap = NULL; |
| 635 | int chunk_id; |
| 636 | int nr_chunks; |
| 637 | int i; |
| 638 | |
| 639 | nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); |
| 640 | |
| 641 | spin_lock(&lpi_lock); |
| 642 | |
| 643 | do { |
| 644 | chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, |
| 645 | 0, nr_chunks, 0); |
| 646 | if (chunk_id < lpi_chunks) |
| 647 | break; |
| 648 | |
| 649 | nr_chunks--; |
| 650 | } while (nr_chunks > 0); |
| 651 | |
| 652 | if (!nr_chunks) |
| 653 | goto out; |
| 654 | |
| 655 | bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long), |
| 656 | GFP_ATOMIC); |
| 657 | if (!bitmap) |
| 658 | goto out; |
| 659 | |
| 660 | for (i = 0; i < nr_chunks; i++) |
| 661 | set_bit(chunk_id + i, lpi_bitmap); |
| 662 | |
| 663 | *base = its_chunk_to_lpi(chunk_id); |
| 664 | *nr_ids = nr_chunks * IRQS_PER_CHUNK; |
| 665 | |
| 666 | out: |
| 667 | spin_unlock(&lpi_lock); |
| 668 | |
| 669 | return bitmap; |
| 670 | } |
| 671 | |
| 672 | static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids) |
| 673 | { |
| 674 | int lpi; |
| 675 | |
| 676 | spin_lock(&lpi_lock); |
| 677 | |
| 678 | for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { |
| 679 | int chunk = its_lpi_to_chunk(lpi); |
| 680 | BUG_ON(chunk > lpi_chunks); |
| 681 | if (test_bit(chunk, lpi_bitmap)) { |
| 682 | clear_bit(chunk, lpi_bitmap); |
| 683 | } else { |
| 684 | pr_err("Bad LPI chunk %d\n", chunk); |
| 685 | } |
| 686 | } |
| 687 | |
| 688 | spin_unlock(&lpi_lock); |
| 689 | |
| 690 | kfree(bitmap); |
| 691 | } |