Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / irqchip / exynos-combiner.c
1 /*
2  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3  *              http://www.samsung.com
4  *
5  * Combiner irqchip for EXYNOS
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/io.h>
15 #include <linux/slab.h>
16 #include <linux/irqdomain.h>
17 #include <linux/irqchip/chained_irq.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_address.h>
20 #include <linux/of_irq.h>
21
22 #include "irqchip.h"
23
24 #define COMBINER_ENABLE_SET     0x0
25 #define COMBINER_ENABLE_CLEAR   0x4
26 #define COMBINER_INT_STATUS     0xC
27
28 #define IRQ_IN_COMBINER         8
29
30 static DEFINE_SPINLOCK(irq_controller_lock);
31
32 struct combiner_chip_data {
33         unsigned int hwirq_offset;
34         unsigned int irq_mask;
35         void __iomem *base;
36         unsigned int parent_irq;
37 };
38
39 static struct irq_domain *combiner_irq_domain;
40
41 static inline void __iomem *combiner_base(struct irq_data *data)
42 {
43         struct combiner_chip_data *combiner_data =
44                 irq_data_get_irq_chip_data(data);
45
46         return combiner_data->base;
47 }
48
49 static void combiner_mask_irq(struct irq_data *data)
50 {
51         u32 mask = 1 << (data->hwirq % 32);
52
53         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
54 }
55
56 static void combiner_unmask_irq(struct irq_data *data)
57 {
58         u32 mask = 1 << (data->hwirq % 32);
59
60         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
61 }
62
63 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
64 {
65         struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
66         struct irq_chip *chip = irq_get_chip(irq);
67         unsigned int cascade_irq, combiner_irq;
68         unsigned long status;
69
70         chained_irq_enter(chip, desc);
71
72         spin_lock(&irq_controller_lock);
73         status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
74         spin_unlock(&irq_controller_lock);
75         status &= chip_data->irq_mask;
76
77         if (status == 0)
78                 goto out;
79
80         combiner_irq = chip_data->hwirq_offset + __ffs(status);
81         cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
82
83         if (unlikely(!cascade_irq))
84                 handle_bad_irq(irq, desc);
85         else
86                 generic_handle_irq(cascade_irq);
87
88  out:
89         chained_irq_exit(chip, desc);
90 }
91
92 #ifdef CONFIG_SMP
93 static int combiner_set_affinity(struct irq_data *d,
94                                  const struct cpumask *mask_val, bool force)
95 {
96         struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
97         struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
98         struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
99
100         if (chip && chip->irq_set_affinity)
101                 return chip->irq_set_affinity(data, mask_val, force);
102         else
103                 return -EINVAL;
104 }
105 #endif
106
107 static struct irq_chip combiner_chip = {
108         .name                   = "COMBINER",
109         .irq_mask               = combiner_mask_irq,
110         .irq_unmask             = combiner_unmask_irq,
111 #ifdef CONFIG_SMP
112         .irq_set_affinity       = combiner_set_affinity,
113 #endif
114 };
115
116 static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
117                                         unsigned int irq)
118 {
119         if (irq_set_handler_data(irq, combiner_data) != 0)
120                 BUG();
121         irq_set_chained_handler(irq, combiner_handle_cascade_irq);
122 }
123
124 static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
125                                      unsigned int combiner_nr,
126                                      void __iomem *base, unsigned int irq)
127 {
128         combiner_data->base = base;
129         combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
130         combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
131         combiner_data->parent_irq = irq;
132
133         /* Disable all interrupts */
134         __raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
135 }
136
137 static int combiner_irq_domain_xlate(struct irq_domain *d,
138                                      struct device_node *controller,
139                                      const u32 *intspec, unsigned int intsize,
140                                      unsigned long *out_hwirq,
141                                      unsigned int *out_type)
142 {
143         if (d->of_node != controller)
144                 return -EINVAL;
145
146         if (intsize < 2)
147                 return -EINVAL;
148
149         *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
150         *out_type = 0;
151
152         return 0;
153 }
154
155 static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
156                                    irq_hw_number_t hw)
157 {
158         struct combiner_chip_data *combiner_data = d->host_data;
159
160         irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
161         irq_set_chip_data(irq, &combiner_data[hw >> 3]);
162         set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
163
164         return 0;
165 }
166
167 static struct irq_domain_ops combiner_irq_domain_ops = {
168         .xlate  = combiner_irq_domain_xlate,
169         .map    = combiner_irq_domain_map,
170 };
171
172 static void __init combiner_init(void __iomem *combiner_base,
173                                  struct device_node *np,
174                                  unsigned int max_nr)
175 {
176         int i, irq;
177         unsigned int nr_irq;
178         struct combiner_chip_data *combiner_data;
179
180         nr_irq = max_nr * IRQ_IN_COMBINER;
181
182         combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
183         if (!combiner_data) {
184                 pr_warning("%s: could not allocate combiner data\n", __func__);
185                 return;
186         }
187
188         combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
189                                 &combiner_irq_domain_ops, combiner_data);
190         if (WARN_ON(!combiner_irq_domain)) {
191                 pr_warning("%s: irq domain init failed\n", __func__);
192                 return;
193         }
194
195         for (i = 0; i < max_nr; i++) {
196                 irq = irq_of_parse_and_map(np, i);
197
198                 combiner_init_one(&combiner_data[i], i,
199                                   combiner_base + (i >> 2) * 0x10, irq);
200                 combiner_cascade_irq(&combiner_data[i], irq);
201         }
202 }
203
204 static int __init combiner_of_init(struct device_node *np,
205                                    struct device_node *parent)
206 {
207         void __iomem *combiner_base;
208         unsigned int max_nr = 20;
209
210         combiner_base = of_iomap(np, 0);
211         if (!combiner_base) {
212                 pr_err("%s: failed to map combiner registers\n", __func__);
213                 return -ENXIO;
214         }
215
216         if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
217                 pr_info("%s: number of combiners not specified, "
218                         "setting default as %d.\n",
219                         __func__, max_nr);
220         }
221
222         combiner_init(combiner_base, np, max_nr);
223
224         return 0;
225 }
226 IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
227                 combiner_of_init);