These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / powerpc / sysdev / xics / icp-native.c
1 /*
2  * Copyright 2011 IBM Corporation.
3  *
4  *  This program is free software; you can redistribute it and/or
5  *  modify it under the terms of the GNU General Public License
6  *  as published by the Free Software Foundation; either version
7  *  2 of the License, or (at your option) any later version.
8  *
9  */
10
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/irq.h>
14 #include <linux/smp.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/cpu.h>
18 #include <linux/of.h>
19 #include <linux/spinlock.h>
20 #include <linux/module.h>
21
22 #include <asm/prom.h>
23 #include <asm/io.h>
24 #include <asm/smp.h>
25 #include <asm/irq.h>
26 #include <asm/errno.h>
27 #include <asm/xics.h>
28 #include <asm/kvm_ppc.h>
29 #include <asm/dbell.h>
30
31 struct icp_ipl {
32         union {
33                 u32 word;
34                 u8 bytes[4];
35         } xirr_poll;
36         union {
37                 u32 word;
38                 u8 bytes[4];
39         } xirr;
40         u32 dummy;
41         union {
42                 u32 word;
43                 u8 bytes[4];
44         } qirr;
45         u32 link_a;
46         u32 link_b;
47         u32 link_c;
48 };
49
50 static struct icp_ipl __iomem *icp_native_regs[NR_CPUS];
51
52 static inline unsigned int icp_native_get_xirr(void)
53 {
54         int cpu = smp_processor_id();
55         unsigned int xirr;
56
57         /* Handled an interrupt latched by KVM */
58         xirr = kvmppc_get_xics_latch();
59         if (xirr)
60                 return xirr;
61
62         return in_be32(&icp_native_regs[cpu]->xirr.word);
63 }
64
65 static inline void icp_native_set_xirr(unsigned int value)
66 {
67         int cpu = smp_processor_id();
68
69         out_be32(&icp_native_regs[cpu]->xirr.word, value);
70 }
71
72 static inline void icp_native_set_cppr(u8 value)
73 {
74         int cpu = smp_processor_id();
75
76         out_8(&icp_native_regs[cpu]->xirr.bytes[0], value);
77 }
78
79 static inline void icp_native_set_qirr(int n_cpu, u8 value)
80 {
81         out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value);
82 }
83
84 static void icp_native_set_cpu_priority(unsigned char cppr)
85 {
86         xics_set_base_cppr(cppr);
87         icp_native_set_cppr(cppr);
88         iosync();
89 }
90
91 void icp_native_eoi(struct irq_data *d)
92 {
93         unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
94
95         iosync();
96         icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq);
97 }
98
99 static void icp_native_teardown_cpu(void)
100 {
101         int cpu = smp_processor_id();
102
103         /* Clear any pending IPI */
104         icp_native_set_qirr(cpu, 0xff);
105 }
106
107 static void icp_native_flush_ipi(void)
108 {
109         /* We take the ipi irq but and never return so we
110          * need to EOI the IPI, but want to leave our priority 0
111          *
112          * should we check all the other interrupts too?
113          * should we be flagging idle loop instead?
114          * or creating some task to be scheduled?
115          */
116
117         icp_native_set_xirr((0x00 << 24) | XICS_IPI);
118 }
119
120 static unsigned int icp_native_get_irq(void)
121 {
122         unsigned int xirr = icp_native_get_xirr();
123         unsigned int vec = xirr & 0x00ffffff;
124         unsigned int irq;
125
126         if (vec == XICS_IRQ_SPURIOUS)
127                 return NO_IRQ;
128
129         irq = irq_find_mapping(xics_host, vec);
130         if (likely(irq != NO_IRQ)) {
131                 xics_push_cppr(vec);
132                 return irq;
133         }
134
135         /* We don't have a linux mapping, so have rtas mask it. */
136         xics_mask_unknown_vec(vec);
137
138         /* We might learn about it later, so EOI it */
139         icp_native_set_xirr(xirr);
140
141         return NO_IRQ;
142 }
143
144 #ifdef CONFIG_SMP
145
146 static void icp_native_cause_ipi(int cpu, unsigned long data)
147 {
148         kvmppc_set_host_ipi(cpu, 1);
149 #ifdef CONFIG_PPC_DOORBELL
150         if (cpu_has_feature(CPU_FTR_DBELL)) {
151                 if (cpumask_test_cpu(cpu, cpu_sibling_mask(get_cpu()))) {
152                         doorbell_cause_ipi(cpu, data);
153                         put_cpu();
154                         return;
155                 }
156                 put_cpu();
157         }
158 #endif
159         icp_native_set_qirr(cpu, IPI_PRIORITY);
160 }
161
162 /*
163  * Called when an interrupt is received on an off-line CPU to
164  * clear the interrupt, so that the CPU can go back to nap mode.
165  */
166 void icp_native_flush_interrupt(void)
167 {
168         unsigned int xirr = icp_native_get_xirr();
169         unsigned int vec = xirr & 0x00ffffff;
170
171         if (vec == XICS_IRQ_SPURIOUS)
172                 return;
173         if (vec == XICS_IPI) {
174                 /* Clear pending IPI */
175                 int cpu = smp_processor_id();
176                 kvmppc_set_host_ipi(cpu, 0);
177                 icp_native_set_qirr(cpu, 0xff);
178         } else {
179                 pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n",
180                        vec);
181                 xics_mask_unknown_vec(vec);
182         }
183         /* EOI the interrupt */
184         icp_native_set_xirr(xirr);
185 }
186
187 void xics_wake_cpu(int cpu)
188 {
189         icp_native_set_qirr(cpu, IPI_PRIORITY);
190 }
191 EXPORT_SYMBOL_GPL(xics_wake_cpu);
192
193 static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
194 {
195         int cpu = smp_processor_id();
196
197         kvmppc_set_host_ipi(cpu, 0);
198         icp_native_set_qirr(cpu, 0xff);
199
200         return smp_ipi_demux();
201 }
202
203 #endif /* CONFIG_SMP */
204
205 static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
206                                          unsigned long size)
207 {
208         char *rname;
209         int i, cpu = -1;
210
211         /* This may look gross but it's good enough for now, we don't quite
212          * have a hard -> linux processor id matching.
213          */
214         for_each_possible_cpu(i) {
215                 if (!cpu_present(i))
216                         continue;
217                 if (hw_id == get_hard_smp_processor_id(i)) {
218                         cpu = i;
219                         break;
220                 }
221         }
222
223         /* Fail, skip that CPU. Don't print, it's normal, some XICS come up
224          * with way more entries in there than you have CPUs
225          */
226         if (cpu == -1)
227                 return 0;
228
229         rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
230                           cpu, hw_id);
231
232         if (!request_mem_region(addr, size, rname)) {
233                 pr_warning("icp_native: Could not reserve ICP MMIO"
234                            " for CPU %d, interrupt server #0x%x\n",
235                            cpu, hw_id);
236                 return -EBUSY;
237         }
238
239         icp_native_regs[cpu] = ioremap(addr, size);
240         kvmppc_set_xics_phys(cpu, addr);
241         if (!icp_native_regs[cpu]) {
242                 pr_warning("icp_native: Failed ioremap for CPU %d, "
243                            "interrupt server #0x%x, addr %#lx\n",
244                            cpu, hw_id, addr);
245                 release_mem_region(addr, size);
246                 return -ENOMEM;
247         }
248         return 0;
249 }
250
251 static int __init icp_native_init_one_node(struct device_node *np,
252                                            unsigned int *indx)
253 {
254         unsigned int ilen;
255         const __be32 *ireg;
256         int i;
257         int reg_tuple_size;
258         int num_servers = 0;
259
260         /* This code does the theorically broken assumption that the interrupt
261          * server numbers are the same as the hard CPU numbers.
262          * This happens to be the case so far but we are playing with fire...
263          * should be fixed one of these days. -BenH.
264          */
265         ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen);
266
267         /* Do that ever happen ? we'll know soon enough... but even good'old
268          * f80 does have that property ..
269          */
270         WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32)));
271
272         if (ireg) {
273                 *indx = of_read_number(ireg, 1);
274                 if (ilen >= 2*sizeof(u32))
275                         num_servers = of_read_number(ireg + 1, 1);
276         }
277
278         ireg = of_get_property(np, "reg", &ilen);
279         if (!ireg) {
280                 pr_err("icp_native: Can't find interrupt reg property");
281                 return -1;
282         }
283
284         reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4;
285         if (((ilen % reg_tuple_size) != 0)
286             || (num_servers && (num_servers != (ilen / reg_tuple_size)))) {
287                 pr_err("icp_native: ICP reg len (%d) != num servers (%d)",
288                        ilen / reg_tuple_size, num_servers);
289                 return -1;
290         }
291
292         for (i = 0; i < (ilen / reg_tuple_size); i++) {
293                 struct resource r;
294                 int err;
295
296                 err = of_address_to_resource(np, i, &r);
297                 if (err) {
298                         pr_err("icp_native: Could not translate ICP MMIO"
299                                " for interrupt server 0x%x (%d)\n", *indx, err);
300                         return -1;
301                 }
302
303                 if (icp_native_map_one_cpu(*indx, r.start, resource_size(&r)))
304                         return -1;
305
306                 (*indx)++;
307         }
308         return 0;
309 }
310
311 static const struct icp_ops icp_native_ops = {
312         .get_irq        = icp_native_get_irq,
313         .eoi            = icp_native_eoi,
314         .set_priority   = icp_native_set_cpu_priority,
315         .teardown_cpu   = icp_native_teardown_cpu,
316         .flush_ipi      = icp_native_flush_ipi,
317 #ifdef CONFIG_SMP
318         .ipi_action     = icp_native_ipi_action,
319         .cause_ipi      = icp_native_cause_ipi,
320 #endif
321 };
322
323 int __init icp_native_init(void)
324 {
325         struct device_node *np;
326         u32 indx = 0;
327         int found = 0;
328
329         for_each_compatible_node(np, NULL, "ibm,ppc-xicp")
330                 if (icp_native_init_one_node(np, &indx) == 0)
331                         found = 1;
332         if (!found) {
333                 for_each_node_by_type(np,
334                         "PowerPC-External-Interrupt-Presentation") {
335                                 if (icp_native_init_one_node(np, &indx) == 0)
336                                         found = 1;
337                 }
338         }
339
340         if (found == 0)
341                 return -ENODEV;
342
343         icp_ops = &icp_native_ops;
344
345         return 0;
346 }