2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/bitmap.h>
19 #include <linux/cpu.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/log2.h>
24 #include <linux/msi.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_pci.h>
29 #include <linux/of_platform.h>
30 #include <linux/percpu.h>
31 #include <linux/slab.h>
33 #include <linux/irqchip.h>
34 #include <linux/irqchip/arm-gic-v3.h>
36 #include <asm/cacheflush.h>
37 #include <asm/cputype.h>
38 #include <asm/exception.h>
40 #include "irq-gic-common.h"
42 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
43 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
44 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
46 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
49 * Collection structure - just an ID, and a redistributor address to
50 * ping. We use one per CPU as a bag of interrupts assigned to this
53 struct its_collection {
59 * The ITS structure - contains most of the infrastructure, with the
60 * top-level MSI domain, the command queue, the collections, and the
61 * list of devices writing to it.
65 struct list_head entry;
67 unsigned long phys_base;
68 struct its_cmd_block *cmd_base;
69 struct its_cmd_block *cmd_write;
70 void *tables[GITS_BASER_NR_REGS];
71 struct its_collection *collections;
72 struct list_head its_device_list;
78 #define ITS_ITT_ALIGN SZ_256
80 struct event_lpi_map {
81 unsigned long *lpi_map;
83 irq_hw_number_t lpi_base;
88 * The ITS view of a device - belongs to an ITS, a collection, owns an
89 * interrupt translation table, and a list of interrupts.
92 struct list_head entry;
94 struct event_lpi_map event_map;
100 static LIST_HEAD(its_nodes);
101 static DEFINE_SPINLOCK(its_lock);
102 static struct device_node *gic_root_node;
103 static struct rdists *gic_rdists;
105 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
106 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
108 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
111 struct its_node *its = its_dev->its;
113 return its->collections + its_dev->event_map.col_map[event];
117 * ITS command descriptors - parameters to be encoded in a command
120 struct its_cmd_desc {
123 struct its_device *dev;
128 struct its_device *dev;
133 struct its_device *dev;
138 struct its_collection *col;
143 struct its_device *dev;
149 struct its_device *dev;
150 struct its_collection *col;
155 struct its_device *dev;
160 struct its_collection *col;
166 * The ITS command block, which is what the ITS actually parses.
168 struct its_cmd_block {
172 #define ITS_CMD_QUEUE_SZ SZ_64K
173 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
175 typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
176 struct its_cmd_desc *);
178 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
180 cmd->raw_cmd[0] &= ~0xffUL;
181 cmd->raw_cmd[0] |= cmd_nr;
184 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
186 cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
187 cmd->raw_cmd[0] |= ((u64)devid) << 32;
190 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
192 cmd->raw_cmd[1] &= ~0xffffffffUL;
193 cmd->raw_cmd[1] |= id;
196 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
198 cmd->raw_cmd[1] &= 0xffffffffUL;
199 cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
202 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
204 cmd->raw_cmd[1] &= ~0x1fUL;
205 cmd->raw_cmd[1] |= size & 0x1f;
208 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
210 cmd->raw_cmd[2] &= ~0xffffffffffffUL;
211 cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
214 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
216 cmd->raw_cmd[2] &= ~(1UL << 63);
217 cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
220 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
222 cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
223 cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
226 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
228 cmd->raw_cmd[2] &= ~0xffffUL;
229 cmd->raw_cmd[2] |= col;
232 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
234 /* Let's fixup BE commands */
235 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
236 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
237 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
238 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
241 static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
242 struct its_cmd_desc *desc)
244 unsigned long itt_addr;
245 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
247 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
248 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
250 its_encode_cmd(cmd, GITS_CMD_MAPD);
251 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
252 its_encode_size(cmd, size - 1);
253 its_encode_itt(cmd, itt_addr);
254 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
261 static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
262 struct its_cmd_desc *desc)
264 its_encode_cmd(cmd, GITS_CMD_MAPC);
265 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
266 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
267 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
271 return desc->its_mapc_cmd.col;
274 static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
275 struct its_cmd_desc *desc)
277 struct its_collection *col;
279 col = dev_event_to_col(desc->its_mapvi_cmd.dev,
280 desc->its_mapvi_cmd.event_id);
282 its_encode_cmd(cmd, GITS_CMD_MAPVI);
283 its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
284 its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
285 its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
286 its_encode_collection(cmd, col->col_id);
293 static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
294 struct its_cmd_desc *desc)
296 struct its_collection *col;
298 col = dev_event_to_col(desc->its_movi_cmd.dev,
299 desc->its_movi_cmd.event_id);
301 its_encode_cmd(cmd, GITS_CMD_MOVI);
302 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
303 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
304 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
311 static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
312 struct its_cmd_desc *desc)
314 struct its_collection *col;
316 col = dev_event_to_col(desc->its_discard_cmd.dev,
317 desc->its_discard_cmd.event_id);
319 its_encode_cmd(cmd, GITS_CMD_DISCARD);
320 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
321 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
328 static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
329 struct its_cmd_desc *desc)
331 struct its_collection *col;
333 col = dev_event_to_col(desc->its_inv_cmd.dev,
334 desc->its_inv_cmd.event_id);
336 its_encode_cmd(cmd, GITS_CMD_INV);
337 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
338 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
345 static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
346 struct its_cmd_desc *desc)
348 its_encode_cmd(cmd, GITS_CMD_INVALL);
349 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
356 static u64 its_cmd_ptr_to_offset(struct its_node *its,
357 struct its_cmd_block *ptr)
359 return (ptr - its->cmd_base) * sizeof(*ptr);
362 static int its_queue_full(struct its_node *its)
367 widx = its->cmd_write - its->cmd_base;
368 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
370 /* This is incredibly unlikely to happen, unless the ITS locks up. */
371 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
377 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
379 struct its_cmd_block *cmd;
380 u32 count = 1000000; /* 1s! */
382 while (its_queue_full(its)) {
385 pr_err_ratelimited("ITS queue not draining\n");
392 cmd = its->cmd_write++;
394 /* Handle queue wrapping */
395 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
396 its->cmd_write = its->cmd_base;
401 static struct its_cmd_block *its_post_commands(struct its_node *its)
403 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
405 writel_relaxed(wr, its->base + GITS_CWRITER);
407 return its->cmd_write;
410 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
413 * Make sure the commands written to memory are observable by
416 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
417 __flush_dcache_area(cmd, sizeof(*cmd));
422 static void its_wait_for_range_completion(struct its_node *its,
423 struct its_cmd_block *from,
424 struct its_cmd_block *to)
426 u64 rd_idx, from_idx, to_idx;
427 u32 count = 1000000; /* 1s! */
429 from_idx = its_cmd_ptr_to_offset(its, from);
430 to_idx = its_cmd_ptr_to_offset(its, to);
433 rd_idx = readl_relaxed(its->base + GITS_CREADR);
434 if (rd_idx >= to_idx || rd_idx < from_idx)
439 pr_err_ratelimited("ITS queue timeout\n");
447 static void its_send_single_command(struct its_node *its,
448 its_cmd_builder_t builder,
449 struct its_cmd_desc *desc)
451 struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
452 struct its_collection *sync_col;
455 raw_spin_lock_irqsave(&its->lock, flags);
457 cmd = its_allocate_entry(its);
458 if (!cmd) { /* We're soooooo screewed... */
459 pr_err_ratelimited("ITS can't allocate, dropping command\n");
460 raw_spin_unlock_irqrestore(&its->lock, flags);
463 sync_col = builder(cmd, desc);
464 its_flush_cmd(its, cmd);
467 sync_cmd = its_allocate_entry(its);
469 pr_err_ratelimited("ITS can't SYNC, skipping\n");
472 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
473 its_encode_target(sync_cmd, sync_col->target_address);
474 its_fixup_cmd(sync_cmd);
475 its_flush_cmd(its, sync_cmd);
479 next_cmd = its_post_commands(its);
480 raw_spin_unlock_irqrestore(&its->lock, flags);
482 its_wait_for_range_completion(its, cmd, next_cmd);
485 static void its_send_inv(struct its_device *dev, u32 event_id)
487 struct its_cmd_desc desc;
489 desc.its_inv_cmd.dev = dev;
490 desc.its_inv_cmd.event_id = event_id;
492 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
495 static void its_send_mapd(struct its_device *dev, int valid)
497 struct its_cmd_desc desc;
499 desc.its_mapd_cmd.dev = dev;
500 desc.its_mapd_cmd.valid = !!valid;
502 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
505 static void its_send_mapc(struct its_node *its, struct its_collection *col,
508 struct its_cmd_desc desc;
510 desc.its_mapc_cmd.col = col;
511 desc.its_mapc_cmd.valid = !!valid;
513 its_send_single_command(its, its_build_mapc_cmd, &desc);
516 static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id)
518 struct its_cmd_desc desc;
520 desc.its_mapvi_cmd.dev = dev;
521 desc.its_mapvi_cmd.phys_id = irq_id;
522 desc.its_mapvi_cmd.event_id = id;
524 its_send_single_command(dev->its, its_build_mapvi_cmd, &desc);
527 static void its_send_movi(struct its_device *dev,
528 struct its_collection *col, u32 id)
530 struct its_cmd_desc desc;
532 desc.its_movi_cmd.dev = dev;
533 desc.its_movi_cmd.col = col;
534 desc.its_movi_cmd.event_id = id;
536 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
539 static void its_send_discard(struct its_device *dev, u32 id)
541 struct its_cmd_desc desc;
543 desc.its_discard_cmd.dev = dev;
544 desc.its_discard_cmd.event_id = id;
546 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
549 static void its_send_invall(struct its_node *its, struct its_collection *col)
551 struct its_cmd_desc desc;
553 desc.its_invall_cmd.col = col;
555 its_send_single_command(its, its_build_invall_cmd, &desc);
559 * irqchip functions - assumes MSI, mostly.
562 static inline u32 its_get_event_id(struct irq_data *d)
564 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
565 return d->hwirq - its_dev->event_map.lpi_base;
568 static void lpi_set_config(struct irq_data *d, bool enable)
570 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
571 irq_hw_number_t hwirq = d->hwirq;
572 u32 id = its_get_event_id(d);
573 u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
576 *cfg |= LPI_PROP_ENABLED;
578 *cfg &= ~LPI_PROP_ENABLED;
581 * Make the above write visible to the redistributors.
582 * And yes, we're flushing exactly: One. Single. Byte.
585 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
586 __flush_dcache_area(cfg, sizeof(*cfg));
589 its_send_inv(its_dev, id);
592 static void its_mask_irq(struct irq_data *d)
594 lpi_set_config(d, false);
597 static void its_unmask_irq(struct irq_data *d)
599 lpi_set_config(d, true);
602 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
606 const struct cpumask *cpu_mask = cpu_online_mask;
607 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
608 struct its_collection *target_col;
609 u32 id = its_get_event_id(d);
611 /* lpi cannot be routed to a redistributor that is on a foreign node */
612 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
613 if (its_dev->its->numa_node >= 0) {
614 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
615 if (!cpumask_intersects(mask_val, cpu_mask))
620 cpu = cpumask_any_and(mask_val, cpu_mask);
622 if (cpu >= nr_cpu_ids)
625 target_col = &its_dev->its->collections[cpu];
626 its_send_movi(its_dev, target_col, id);
627 its_dev->event_map.col_map[id] = cpu;
629 return IRQ_SET_MASK_OK_DONE;
632 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
634 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
635 struct its_node *its;
639 addr = its->phys_base + GITS_TRANSLATER;
641 msg->address_lo = addr & ((1UL << 32) - 1);
642 msg->address_hi = addr >> 32;
643 msg->data = its_get_event_id(d);
646 static struct irq_chip its_irq_chip = {
648 .irq_mask = its_mask_irq,
649 .irq_unmask = its_unmask_irq,
650 .irq_eoi = irq_chip_eoi_parent,
651 .irq_set_affinity = its_set_affinity,
652 .irq_compose_msi_msg = its_irq_compose_msi_msg,
656 * How we allocate LPIs:
658 * The GIC has id_bits bits for interrupt identifiers. From there, we
659 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
660 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
663 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
665 #define IRQS_PER_CHUNK_SHIFT 5
666 #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
668 static unsigned long *lpi_bitmap;
669 static u32 lpi_chunks;
670 static DEFINE_SPINLOCK(lpi_lock);
672 static int its_lpi_to_chunk(int lpi)
674 return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
677 static int its_chunk_to_lpi(int chunk)
679 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
682 static int its_lpi_init(u32 id_bits)
684 lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
686 lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
693 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
697 static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
699 unsigned long *bitmap = NULL;
704 nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
706 spin_lock(&lpi_lock);
709 chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
711 if (chunk_id < lpi_chunks)
715 } while (nr_chunks > 0);
720 bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
725 for (i = 0; i < nr_chunks; i++)
726 set_bit(chunk_id + i, lpi_bitmap);
728 *base = its_chunk_to_lpi(chunk_id);
729 *nr_ids = nr_chunks * IRQS_PER_CHUNK;
732 spin_unlock(&lpi_lock);
740 static void its_lpi_free(struct event_lpi_map *map)
742 int base = map->lpi_base;
743 int nr_ids = map->nr_lpis;
746 spin_lock(&lpi_lock);
748 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
749 int chunk = its_lpi_to_chunk(lpi);
750 BUG_ON(chunk > lpi_chunks);
751 if (test_bit(chunk, lpi_bitmap)) {
752 clear_bit(chunk, lpi_bitmap);
754 pr_err("Bad LPI chunk %d\n", chunk);
758 spin_unlock(&lpi_lock);
765 * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to
766 * deal with (one configuration byte per interrupt). PENDBASE has to
767 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
769 #define LPI_PROPBASE_SZ SZ_64K
770 #define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K)
773 * This is how many bits of ID we need, including the useless ones.
775 #define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K)
777 #define LPI_PROP_DEFAULT_PRIO 0xa0
779 static int __init its_alloc_lpi_tables(void)
783 gic_rdists->prop_page = alloc_pages(GFP_NOWAIT,
784 get_order(LPI_PROPBASE_SZ));
785 if (!gic_rdists->prop_page) {
786 pr_err("Failed to allocate PROPBASE\n");
790 paddr = page_to_phys(gic_rdists->prop_page);
791 pr_info("GIC: using LPI property table @%pa\n", &paddr);
793 /* Priority 0xa0, Group-1, disabled */
794 memset(page_address(gic_rdists->prop_page),
795 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
798 /* Make sure the GIC will observe the written configuration */
799 __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
804 static const char *its_base_type_string[] = {
805 [GITS_BASER_TYPE_DEVICE] = "Devices",
806 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
807 [GITS_BASER_TYPE_CPU] = "Physical CPUs",
808 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
809 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
810 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
811 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
814 static void its_free_tables(struct its_node *its)
818 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
819 if (its->tables[i]) {
820 free_page((unsigned long)its->tables[i]);
821 its->tables[i] = NULL;
826 static int its_alloc_tables(const char *node_name, struct its_node *its)
831 u64 shr = GITS_BASER_InnerShareable;
836 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
838 * erratum 22375: only alloc 8MB table size
839 * erratum 24313: ignore memory access type
842 ids = 0x14; /* 20 bits, 8MB */
844 cache = GITS_BASER_WaWb;
845 typer = readq_relaxed(its->base + GITS_TYPER);
846 ids = GITS_TYPER_DEVBITS(typer);
849 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
850 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
851 u64 type = GITS_BASER_TYPE(val);
852 u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
853 int order = get_order(psz);
859 if (type == GITS_BASER_TYPE_NONE)
863 * Allocate as many entries as required to fit the
864 * range of device IDs that the ITS can grok... The ID
865 * space being incredibly sparse, this results in a
866 * massive waste of memory.
868 * For other tables, only allocate a single page.
870 if (type == GITS_BASER_TYPE_DEVICE) {
872 * 'order' was initialized earlier to the default page
873 * granule of the the ITS. We can't have an allocation
874 * smaller than that. If the requested allocation
875 * is smaller, round up to the default page granule.
877 order = max(get_order((1UL << ids) * entry_size),
879 if (order >= MAX_ORDER) {
880 order = MAX_ORDER - 1;
881 pr_warn("%s: Device Table too large, reduce its page order to %u\n",
886 alloc_size = (1 << order) * PAGE_SIZE;
887 alloc_pages = (alloc_size / psz);
888 if (alloc_pages > GITS_BASER_PAGES_MAX) {
889 alloc_pages = GITS_BASER_PAGES_MAX;
890 order = get_order(GITS_BASER_PAGES_MAX * psz);
891 pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n",
892 node_name, order, alloc_pages);
895 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
901 its->tables[i] = base;
904 val = (virt_to_phys(base) |
905 (type << GITS_BASER_TYPE_SHIFT) |
906 ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
913 val |= GITS_BASER_PAGE_SIZE_4K;
916 val |= GITS_BASER_PAGE_SIZE_16K;
919 val |= GITS_BASER_PAGE_SIZE_64K;
923 val |= alloc_pages - 1;
925 writeq_relaxed(val, its->base + GITS_BASER + i * 8);
926 tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
928 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
930 * Shareability didn't stick. Just use
931 * whatever the read reported, which is likely
932 * to be the only thing this redistributor
933 * supports. If that's zero, make it
934 * non-cacheable as well.
936 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
938 cache = GITS_BASER_nC;
939 __flush_dcache_area(base, alloc_size);
944 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
946 * Page size didn't stick. Let's try a smaller
947 * size and retry. If we reach 4K, then
948 * something is horribly wrong...
961 pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
963 (unsigned long) val, (unsigned long) tmp);
968 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
969 (int)(alloc_size / entry_size),
970 its_base_type_string[type],
971 (unsigned long)virt_to_phys(base),
972 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
978 its_free_tables(its);
983 static int its_alloc_collections(struct its_node *its)
985 its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
987 if (!its->collections)
993 static void its_cpu_init_lpis(void)
995 void __iomem *rbase = gic_data_rdist_rd_base();
996 struct page *pend_page;
999 /* If we didn't allocate the pending table yet, do it now */
1000 pend_page = gic_data_rdist()->pend_page;
1004 * The pending pages have to be at least 64kB aligned,
1005 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1007 pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
1008 get_order(max(LPI_PENDBASE_SZ, SZ_64K)));
1010 pr_err("Failed to allocate PENDBASE for CPU%d\n",
1011 smp_processor_id());
1015 /* Make sure the GIC will observe the zero-ed page */
1016 __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
1018 paddr = page_to_phys(pend_page);
1019 pr_info("CPU%d: using LPI pending table @%pa\n",
1020 smp_processor_id(), &paddr);
1021 gic_data_rdist()->pend_page = pend_page;
1025 val = readl_relaxed(rbase + GICR_CTLR);
1026 val &= ~GICR_CTLR_ENABLE_LPIS;
1027 writel_relaxed(val, rbase + GICR_CTLR);
1030 * Make sure any change to the table is observable by the GIC.
1035 val = (page_to_phys(gic_rdists->prop_page) |
1036 GICR_PROPBASER_InnerShareable |
1037 GICR_PROPBASER_WaWb |
1038 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1040 writeq_relaxed(val, rbase + GICR_PROPBASER);
1041 tmp = readq_relaxed(rbase + GICR_PROPBASER);
1043 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
1044 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
1046 * The HW reports non-shareable, we must
1047 * remove the cacheability attributes as
1050 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
1051 GICR_PROPBASER_CACHEABILITY_MASK);
1052 val |= GICR_PROPBASER_nC;
1053 writeq_relaxed(val, rbase + GICR_PROPBASER);
1055 pr_info_once("GIC: using cache flushing for LPI property table\n");
1056 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
1060 val = (page_to_phys(pend_page) |
1061 GICR_PENDBASER_InnerShareable |
1062 GICR_PENDBASER_WaWb);
1064 writeq_relaxed(val, rbase + GICR_PENDBASER);
1065 tmp = readq_relaxed(rbase + GICR_PENDBASER);
1067 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
1069 * The HW reports non-shareable, we must remove the
1070 * cacheability attributes as well.
1072 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
1073 GICR_PENDBASER_CACHEABILITY_MASK);
1074 val |= GICR_PENDBASER_nC;
1075 writeq_relaxed(val, rbase + GICR_PENDBASER);
1079 val = readl_relaxed(rbase + GICR_CTLR);
1080 val |= GICR_CTLR_ENABLE_LPIS;
1081 writel_relaxed(val, rbase + GICR_CTLR);
1083 /* Make sure the GIC has seen the above */
1087 static void its_cpu_init_collection(void)
1089 struct its_node *its;
1092 spin_lock(&its_lock);
1093 cpu = smp_processor_id();
1095 list_for_each_entry(its, &its_nodes, entry) {
1098 /* avoid cross node collections and its mapping */
1099 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1100 struct device_node *cpu_node;
1102 cpu_node = of_get_cpu_node(cpu, NULL);
1103 if (its->numa_node != NUMA_NO_NODE &&
1104 its->numa_node != of_node_to_nid(cpu_node))
1109 * We now have to bind each collection to its target
1112 if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
1114 * This ITS wants the physical address of the
1117 target = gic_data_rdist()->phys_base;
1120 * This ITS wants a linear CPU number.
1122 target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
1123 target = GICR_TYPER_CPU_NUMBER(target) << 16;
1126 /* Perform collection mapping */
1127 its->collections[cpu].target_address = target;
1128 its->collections[cpu].col_id = cpu;
1130 its_send_mapc(its, &its->collections[cpu], 1);
1131 its_send_invall(its, &its->collections[cpu]);
1134 spin_unlock(&its_lock);
1137 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1139 struct its_device *its_dev = NULL, *tmp;
1140 unsigned long flags;
1142 raw_spin_lock_irqsave(&its->lock, flags);
1144 list_for_each_entry(tmp, &its->its_device_list, entry) {
1145 if (tmp->device_id == dev_id) {
1151 raw_spin_unlock_irqrestore(&its->lock, flags);
1156 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1159 struct its_device *dev;
1160 unsigned long *lpi_map;
1161 unsigned long flags;
1162 u16 *col_map = NULL;
1169 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1171 * At least one bit of EventID is being used, hence a minimum
1172 * of two entries. No, the architecture doesn't let you
1173 * express an ITT with a single entry.
1175 nr_ites = max(2UL, roundup_pow_of_two(nvecs));
1176 sz = nr_ites * its->ite_size;
1177 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1178 itt = kzalloc(sz, GFP_KERNEL);
1179 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1181 col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
1183 if (!dev || !itt || !lpi_map || !col_map) {
1191 __flush_dcache_area(itt, sz);
1195 dev->nr_ites = nr_ites;
1196 dev->event_map.lpi_map = lpi_map;
1197 dev->event_map.col_map = col_map;
1198 dev->event_map.lpi_base = lpi_base;
1199 dev->event_map.nr_lpis = nr_lpis;
1200 dev->device_id = dev_id;
1201 INIT_LIST_HEAD(&dev->entry);
1203 raw_spin_lock_irqsave(&its->lock, flags);
1204 list_add(&dev->entry, &its->its_device_list);
1205 raw_spin_unlock_irqrestore(&its->lock, flags);
1207 /* Map device to its ITT */
1208 its_send_mapd(dev, 1);
1213 static void its_free_device(struct its_device *its_dev)
1215 unsigned long flags;
1217 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
1218 list_del(&its_dev->entry);
1219 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
1220 kfree(its_dev->itt);
1224 static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1228 idx = find_first_zero_bit(dev->event_map.lpi_map,
1229 dev->event_map.nr_lpis);
1230 if (idx == dev->event_map.nr_lpis)
1233 *hwirq = dev->event_map.lpi_base + idx;
1234 set_bit(idx, dev->event_map.lpi_map);
1239 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1240 int nvec, msi_alloc_info_t *info)
1242 struct its_node *its;
1243 struct its_device *its_dev;
1244 struct msi_domain_info *msi_info;
1248 * We ignore "dev" entierely, and rely on the dev_id that has
1249 * been passed via the scratchpad. This limits this domain's
1250 * usefulness to upper layers that definitely know that they
1251 * are built on top of the ITS.
1253 dev_id = info->scratchpad[0].ul;
1255 msi_info = msi_get_domain_info(domain);
1256 its = msi_info->data;
1258 its_dev = its_find_device(its, dev_id);
1261 * We already have seen this ID, probably through
1262 * another alias (PCI bridge of some sort). No need to
1263 * create the device.
1265 pr_debug("Reusing ITT for devID %x\n", dev_id);
1269 its_dev = its_create_device(its, dev_id, nvec);
1273 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
1275 info->scratchpad[0].ptr = its_dev;
1279 static struct msi_domain_ops its_msi_domain_ops = {
1280 .msi_prepare = its_msi_prepare,
1283 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
1285 irq_hw_number_t hwirq)
1287 struct irq_fwspec fwspec;
1289 if (irq_domain_get_of_node(domain->parent)) {
1290 fwspec.fwnode = domain->parent->fwnode;
1291 fwspec.param_count = 3;
1292 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
1293 fwspec.param[1] = hwirq;
1294 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
1299 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
1302 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1303 unsigned int nr_irqs, void *args)
1305 msi_alloc_info_t *info = args;
1306 struct its_device *its_dev = info->scratchpad[0].ptr;
1307 irq_hw_number_t hwirq;
1311 for (i = 0; i < nr_irqs; i++) {
1312 err = its_alloc_device_irq(its_dev, &hwirq);
1316 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
1320 irq_domain_set_hwirq_and_chip(domain, virq + i,
1321 hwirq, &its_irq_chip, its_dev);
1322 pr_debug("ID:%d pID:%d vID:%d\n",
1323 (int)(hwirq - its_dev->event_map.lpi_base),
1324 (int) hwirq, virq + i);
1330 static void its_irq_domain_activate(struct irq_domain *domain,
1333 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1334 u32 event = its_get_event_id(d);
1335 const struct cpumask *cpu_mask = cpu_online_mask;
1337 /* get the cpu_mask of local node */
1338 if (its_dev->its->numa_node >= 0)
1339 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1341 /* Bind the LPI to the first possible CPU */
1342 its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
1344 /* Map the GIC IRQ and event to the device */
1345 its_send_mapvi(its_dev, d->hwirq, event);
1348 static void its_irq_domain_deactivate(struct irq_domain *domain,
1351 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1352 u32 event = its_get_event_id(d);
1354 /* Stop the delivery of interrupts */
1355 its_send_discard(its_dev, event);
1358 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1359 unsigned int nr_irqs)
1361 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1362 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1365 for (i = 0; i < nr_irqs; i++) {
1366 struct irq_data *data = irq_domain_get_irq_data(domain,
1368 u32 event = its_get_event_id(data);
1370 /* Mark interrupt index as unused */
1371 clear_bit(event, its_dev->event_map.lpi_map);
1373 /* Nuke the entry in the domain */
1374 irq_domain_reset_irq_data(data);
1377 /* If all interrupts have been freed, start mopping the floor */
1378 if (bitmap_empty(its_dev->event_map.lpi_map,
1379 its_dev->event_map.nr_lpis)) {
1380 its_lpi_free(&its_dev->event_map);
1382 /* Unmap device/itt */
1383 its_send_mapd(its_dev, 0);
1384 its_free_device(its_dev);
1387 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1390 static const struct irq_domain_ops its_domain_ops = {
1391 .alloc = its_irq_domain_alloc,
1392 .free = its_irq_domain_free,
1393 .activate = its_irq_domain_activate,
1394 .deactivate = its_irq_domain_deactivate,
1397 static int its_force_quiescent(void __iomem *base)
1399 u32 count = 1000000; /* 1s */
1402 val = readl_relaxed(base + GITS_CTLR);
1403 if (val & GITS_CTLR_QUIESCENT)
1406 /* Disable the generation of all interrupts to this ITS */
1407 val &= ~GITS_CTLR_ENABLE;
1408 writel_relaxed(val, base + GITS_CTLR);
1410 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
1412 val = readl_relaxed(base + GITS_CTLR);
1413 if (val & GITS_CTLR_QUIESCENT)
1425 static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
1427 struct its_node *its = data;
1429 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
1432 static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
1434 struct its_node *its = data;
1436 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
1439 static const struct gic_quirk its_quirks[] = {
1440 #ifdef CONFIG_CAVIUM_ERRATUM_22375
1442 .desc = "ITS: Cavium errata 22375, 24313",
1443 .iidr = 0xa100034c, /* ThunderX pass 1.x */
1445 .init = its_enable_quirk_cavium_22375,
1448 #ifdef CONFIG_CAVIUM_ERRATUM_23144
1450 .desc = "ITS: Cavium erratum 23144",
1451 .iidr = 0xa100034c, /* ThunderX pass 1.x */
1453 .init = its_enable_quirk_cavium_23144,
1460 static void its_enable_quirks(struct its_node *its)
1462 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
1464 gic_enable_quirks(iidr, its_quirks, its);
1467 static int its_probe(struct device_node *node, struct irq_domain *parent)
1469 struct resource res;
1470 struct its_node *its;
1471 void __iomem *its_base;
1472 struct irq_domain *inner_domain;
1477 err = of_address_to_resource(node, 0, &res);
1479 pr_warn("%s: no regs?\n", node->full_name);
1483 its_base = ioremap(res.start, resource_size(&res));
1485 pr_warn("%s: unable to map registers\n", node->full_name);
1489 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
1490 if (val != 0x30 && val != 0x40) {
1491 pr_warn("%s: no ITS detected, giving up\n", node->full_name);
1496 err = its_force_quiescent(its_base);
1498 pr_warn("%s: failed to quiesce, giving up\n",
1503 pr_info("ITS: %s\n", node->full_name);
1505 its = kzalloc(sizeof(*its), GFP_KERNEL);
1511 raw_spin_lock_init(&its->lock);
1512 INIT_LIST_HEAD(&its->entry);
1513 INIT_LIST_HEAD(&its->its_device_list);
1514 its->base = its_base;
1515 its->phys_base = res.start;
1516 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
1517 its->numa_node = of_node_to_nid(node);
1519 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
1520 if (!its->cmd_base) {
1524 its->cmd_write = its->cmd_base;
1526 its_enable_quirks(its);
1528 err = its_alloc_tables(node->full_name, its);
1532 err = its_alloc_collections(its);
1534 goto out_free_tables;
1536 baser = (virt_to_phys(its->cmd_base) |
1538 GITS_CBASER_InnerShareable |
1539 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
1542 writeq_relaxed(baser, its->base + GITS_CBASER);
1543 tmp = readq_relaxed(its->base + GITS_CBASER);
1545 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
1546 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
1548 * The HW reports non-shareable, we must
1549 * remove the cacheability attributes as
1552 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
1553 GITS_CBASER_CACHEABILITY_MASK);
1554 baser |= GITS_CBASER_nC;
1555 writeq_relaxed(baser, its->base + GITS_CBASER);
1557 pr_info("ITS: using cache flushing for cmd queue\n");
1558 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
1561 writeq_relaxed(0, its->base + GITS_CWRITER);
1562 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
1564 if (of_property_read_bool(node, "msi-controller")) {
1565 struct msi_domain_info *info;
1567 info = kzalloc(sizeof(*info), GFP_KERNEL);
1570 goto out_free_tables;
1573 inner_domain = irq_domain_add_tree(node, &its_domain_ops, its);
1574 if (!inner_domain) {
1577 goto out_free_tables;
1580 inner_domain->parent = parent;
1581 inner_domain->bus_token = DOMAIN_BUS_NEXUS;
1582 info->ops = &its_msi_domain_ops;
1584 inner_domain->host_data = info;
1587 spin_lock(&its_lock);
1588 list_add(&its->entry, &its_nodes);
1589 spin_unlock(&its_lock);
1594 its_free_tables(its);
1596 kfree(its->cmd_base);
1601 pr_err("ITS: failed probing %s (%d)\n", node->full_name, err);
1605 static bool gic_rdists_supports_plpis(void)
1607 return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
1610 int its_cpu_init(void)
1612 if (!list_empty(&its_nodes)) {
1613 if (!gic_rdists_supports_plpis()) {
1614 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1617 its_cpu_init_lpis();
1618 its_cpu_init_collection();
1624 static struct of_device_id its_device_id[] = {
1625 { .compatible = "arm,gic-v3-its", },
1629 int its_init(struct device_node *node, struct rdists *rdists,
1630 struct irq_domain *parent_domain)
1632 struct device_node *np;
1634 for (np = of_find_matching_node(node, its_device_id); np;
1635 np = of_find_matching_node(np, its_device_id)) {
1636 its_probe(np, parent_domain);
1639 if (list_empty(&its_nodes)) {
1640 pr_warn("ITS: No ITS available, not enabling LPIs\n");
1644 gic_rdists = rdists;
1645 gic_root_node = node;
1647 its_alloc_lpi_tables();
1648 its_lpi_init(rdists->id_bits);