e209e6fc7cafa553fa56ec5f0f2b845d447a0ff7
[kvmfornfv.git] / kernel / arch / arm / mach-bcm / platsmp-brcmstb.c
1 /*
2  * Broadcom STB CPU SMP and hotplug support for ARM
3  *
4  * Copyright (C) 2013-2014 Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15
16 #include <linux/delay.h>
17 #include <linux/errno.h>
18 #include <linux/init.h>
19 #include <linux/io.h>
20 #include <linux/jiffies.h>
21 #include <linux/of_address.h>
22 #include <linux/of_platform.h>
23 #include <linux/printk.h>
24 #include <linux/regmap.h>
25 #include <linux/smp.h>
26 #include <linux/mfd/syscon.h>
27
28 #include <asm/cacheflush.h>
29 #include <asm/cp15.h>
30 #include <asm/mach-types.h>
31 #include <asm/smp_plat.h>
32
33 #include "brcmstb.h"
34
35 enum {
36         ZONE_MAN_CLKEN_MASK             = BIT(0),
37         ZONE_MAN_RESET_CNTL_MASK        = BIT(1),
38         ZONE_MAN_MEM_PWR_MASK           = BIT(4),
39         ZONE_RESERVED_1_MASK            = BIT(5),
40         ZONE_MAN_ISO_CNTL_MASK          = BIT(6),
41         ZONE_MANUAL_CONTROL_MASK        = BIT(7),
42         ZONE_PWR_DN_REQ_MASK            = BIT(9),
43         ZONE_PWR_UP_REQ_MASK            = BIT(10),
44         ZONE_BLK_RST_ASSERT_MASK        = BIT(12),
45         ZONE_PWR_OFF_STATE_MASK         = BIT(25),
46         ZONE_PWR_ON_STATE_MASK          = BIT(26),
47         ZONE_DPG_PWR_STATE_MASK         = BIT(28),
48         ZONE_MEM_PWR_STATE_MASK         = BIT(29),
49         ZONE_RESET_STATE_MASK           = BIT(31),
50         CPU0_PWR_ZONE_CTRL_REG          = 1,
51         CPU_RESET_CONFIG_REG            = 2,
52 };
53
54 static void __iomem *cpubiuctrl_block;
55 static void __iomem *hif_cont_block;
56 static u32 cpu0_pwr_zone_ctrl_reg;
57 static u32 cpu_rst_cfg_reg;
58 static u32 hif_cont_reg;
59
60 #ifdef CONFIG_HOTPLUG_CPU
61 /*
62  * We must quiesce a dying CPU before it can be killed by the boot CPU. Because
63  * one or more cache may be disabled, we must flush to ensure coherency. We
64  * cannot use traditionl completion structures or spinlocks as they rely on
65  * coherency.
66  */
67 static DEFINE_PER_CPU_ALIGNED(int, per_cpu_sw_state);
68
69 static int per_cpu_sw_state_rd(u32 cpu)
70 {
71         sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
72         return per_cpu(per_cpu_sw_state, cpu);
73 }
74
75 static void per_cpu_sw_state_wr(u32 cpu, int val)
76 {
77         dmb();
78         per_cpu(per_cpu_sw_state, cpu) = val;
79         sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
80 }
81 #else
82 static inline void per_cpu_sw_state_wr(u32 cpu, int val) { }
83 #endif
84
85 static void __iomem *pwr_ctrl_get_base(u32 cpu)
86 {
87         void __iomem *base = cpubiuctrl_block + cpu0_pwr_zone_ctrl_reg;
88         base += (cpu_logical_map(cpu) * 4);
89         return base;
90 }
91
92 static u32 pwr_ctrl_rd(u32 cpu)
93 {
94         void __iomem *base = pwr_ctrl_get_base(cpu);
95         return readl_relaxed(base);
96 }
97
98 static void pwr_ctrl_set(unsigned int cpu, u32 val, u32 mask)
99 {
100         void __iomem *base = pwr_ctrl_get_base(cpu);
101         writel((readl(base) & mask) | val, base);
102 }
103
104 static void pwr_ctrl_clr(unsigned int cpu, u32 val, u32 mask)
105 {
106         void __iomem *base = pwr_ctrl_get_base(cpu);
107         writel((readl(base) & mask) & ~val, base);
108 }
109
110 #define POLL_TMOUT_MS 500
111 static int pwr_ctrl_wait_tmout(unsigned int cpu, u32 set, u32 mask)
112 {
113         const unsigned long timeo = jiffies + msecs_to_jiffies(POLL_TMOUT_MS);
114         u32 tmp;
115
116         do {
117                 tmp = pwr_ctrl_rd(cpu) & mask;
118                 if (!set == !tmp)
119                         return 0;
120         } while (time_before(jiffies, timeo));
121
122         tmp = pwr_ctrl_rd(cpu) & mask;
123         if (!set == !tmp)
124                 return 0;
125
126         return -ETIMEDOUT;
127 }
128
129 static void cpu_rst_cfg_set(u32 cpu, int set)
130 {
131         u32 val;
132         val = readl_relaxed(cpubiuctrl_block + cpu_rst_cfg_reg);
133         if (set)
134                 val |= BIT(cpu_logical_map(cpu));
135         else
136                 val &= ~BIT(cpu_logical_map(cpu));
137         writel_relaxed(val, cpubiuctrl_block + cpu_rst_cfg_reg);
138 }
139
140 static void cpu_set_boot_addr(u32 cpu, unsigned long boot_addr)
141 {
142         const int reg_ofs = cpu_logical_map(cpu) * 8;
143         writel_relaxed(0, hif_cont_block + hif_cont_reg + reg_ofs);
144         writel_relaxed(boot_addr, hif_cont_block + hif_cont_reg + 4 + reg_ofs);
145 }
146
147 static void brcmstb_cpu_boot(u32 cpu)
148 {
149         /* Mark this CPU as "up" */
150         per_cpu_sw_state_wr(cpu, 1);
151
152         /*
153          * Set the reset vector to point to the secondary_startup
154          * routine
155          */
156         cpu_set_boot_addr(cpu, virt_to_phys(brcmstb_secondary_startup));
157
158         /* Unhalt the cpu */
159         cpu_rst_cfg_set(cpu, 0);
160 }
161
162 static void brcmstb_cpu_power_on(u32 cpu)
163 {
164         /*
165          * The secondary cores power was cut, so we must go through
166          * power-on initialization.
167          */
168         pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, 0xffffff00);
169         pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1);
170         pwr_ctrl_set(cpu, ZONE_RESERVED_1_MASK, -1);
171
172         pwr_ctrl_set(cpu, ZONE_MAN_MEM_PWR_MASK, -1);
173
174         if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_MEM_PWR_STATE_MASK))
175                 panic("ZONE_MEM_PWR_STATE_MASK set timeout");
176
177         pwr_ctrl_set(cpu, ZONE_MAN_CLKEN_MASK, -1);
178
179         if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_DPG_PWR_STATE_MASK))
180                 panic("ZONE_DPG_PWR_STATE_MASK set timeout");
181
182         pwr_ctrl_clr(cpu, ZONE_MAN_ISO_CNTL_MASK, -1);
183         pwr_ctrl_set(cpu, ZONE_MAN_RESET_CNTL_MASK, -1);
184 }
185
186 static int brcmstb_cpu_get_power_state(u32 cpu)
187 {
188         int tmp = pwr_ctrl_rd(cpu);
189         return (tmp & ZONE_RESET_STATE_MASK) ? 0 : 1;
190 }
191
192 #ifdef CONFIG_HOTPLUG_CPU
193
194 static void brcmstb_cpu_die(u32 cpu)
195 {
196         v7_exit_coherency_flush(all);
197
198         per_cpu_sw_state_wr(cpu, 0);
199
200         /* Sit and wait to die */
201         wfi();
202
203         /* We should never get here... */
204         while (1)
205                 ;
206 }
207
208 static int brcmstb_cpu_kill(u32 cpu)
209 {
210         /*
211          * Ordinarily, the hardware forbids power-down of CPU0 (which is good
212          * because it is the boot CPU), but this is not true when using BPCM
213          * manual mode.  Consequently, we must avoid turning off CPU0 here to
214          * ensure that TI2C master reset will work.
215          */
216         if (cpu == 0) {
217                 pr_warn("SMP: refusing to power off CPU0\n");
218                 return 1;
219         }
220
221         while (per_cpu_sw_state_rd(cpu))
222                 ;
223
224         pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1);
225         pwr_ctrl_clr(cpu, ZONE_MAN_RESET_CNTL_MASK, -1);
226         pwr_ctrl_clr(cpu, ZONE_MAN_CLKEN_MASK, -1);
227         pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, -1);
228         pwr_ctrl_clr(cpu, ZONE_MAN_MEM_PWR_MASK, -1);
229
230         if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_MEM_PWR_STATE_MASK))
231                 panic("ZONE_MEM_PWR_STATE_MASK clear timeout");
232
233         pwr_ctrl_clr(cpu, ZONE_RESERVED_1_MASK, -1);
234
235         if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_DPG_PWR_STATE_MASK))
236                 panic("ZONE_DPG_PWR_STATE_MASK clear timeout");
237
238         /* Flush pipeline before resetting CPU */
239         mb();
240
241         /* Assert reset on the CPU */
242         cpu_rst_cfg_set(cpu, 1);
243
244         return 1;
245 }
246
247 #endif /* CONFIG_HOTPLUG_CPU */
248
249 static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
250 {
251         int rc = 0;
252         char *name;
253         struct device_node *syscon_np = NULL;
254
255         name = "syscon-cpu";
256
257         syscon_np = of_parse_phandle(np, name, 0);
258         if (!syscon_np) {
259                 pr_err("can't find phandle %s\n", name);
260                 rc = -EINVAL;
261                 goto cleanup;
262         }
263
264         cpubiuctrl_block = of_iomap(syscon_np, 0);
265         if (!cpubiuctrl_block) {
266                 pr_err("iomap failed for cpubiuctrl_block\n");
267                 rc = -EINVAL;
268                 goto cleanup;
269         }
270
271         rc = of_property_read_u32_index(np, name, CPU0_PWR_ZONE_CTRL_REG,
272                                         &cpu0_pwr_zone_ctrl_reg);
273         if (rc) {
274                 pr_err("failed to read 1st entry from %s property (%d)\n", name,
275                         rc);
276                 rc = -EINVAL;
277                 goto cleanup;
278         }
279
280         rc = of_property_read_u32_index(np, name, CPU_RESET_CONFIG_REG,
281                                         &cpu_rst_cfg_reg);
282         if (rc) {
283                 pr_err("failed to read 2nd entry from %s property (%d)\n", name,
284                         rc);
285                 rc = -EINVAL;
286                 goto cleanup;
287         }
288
289 cleanup:
290         of_node_put(syscon_np);
291         return rc;
292 }
293
294 static int __init setup_hifcont_regs(struct device_node *np)
295 {
296         int rc = 0;
297         char *name;
298         struct device_node *syscon_np = NULL;
299
300         name = "syscon-cont";
301
302         syscon_np = of_parse_phandle(np, name, 0);
303         if (!syscon_np) {
304                 pr_err("can't find phandle %s\n", name);
305                 rc = -EINVAL;
306                 goto cleanup;
307         }
308
309         hif_cont_block = of_iomap(syscon_np, 0);
310         if (!hif_cont_block) {
311                 pr_err("iomap failed for hif_cont_block\n");
312                 rc = -EINVAL;
313                 goto cleanup;
314         }
315
316         /* Offset is at top of hif_cont_block */
317         hif_cont_reg = 0;
318
319 cleanup:
320         of_node_put(syscon_np);
321         return rc;
322 }
323
324 static void __init brcmstb_cpu_ctrl_setup(unsigned int max_cpus)
325 {
326         int rc;
327         struct device_node *np;
328         char *name;
329
330         name = "brcm,brcmstb-smpboot";
331         np = of_find_compatible_node(NULL, NULL, name);
332         if (!np) {
333                 pr_err("can't find compatible node %s\n", name);
334                 return;
335         }
336
337         rc = setup_hifcpubiuctrl_regs(np);
338         if (rc)
339                 return;
340
341         rc = setup_hifcont_regs(np);
342         if (rc)
343                 return;
344 }
345
346 static int brcmstb_boot_secondary(unsigned int cpu, struct task_struct *idle)
347 {
348         /* Missing the brcm,brcmstb-smpboot DT node? */
349         if (!cpubiuctrl_block || !hif_cont_block)
350                 return -ENODEV;
351
352         /* Bring up power to the core if necessary */
353         if (brcmstb_cpu_get_power_state(cpu) == 0)
354                 brcmstb_cpu_power_on(cpu);
355
356         brcmstb_cpu_boot(cpu);
357
358         return 0;
359 }
360
361 static struct smp_operations brcmstb_smp_ops __initdata = {
362         .smp_prepare_cpus       = brcmstb_cpu_ctrl_setup,
363         .smp_boot_secondary     = brcmstb_boot_secondary,
364 #ifdef CONFIG_HOTPLUG_CPU
365         .cpu_kill               = brcmstb_cpu_kill,
366         .cpu_die                = brcmstb_cpu_die,
367 #endif
368 };
369
370 CPU_METHOD_OF_DECLARE(brcmstb_smp, "brcm,brahma-b15", &brcmstb_smp_ops);