Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / metag / kernel / process.c
1 /*
2  * Copyright (C) 2005,2006,2007,2008,2009,2010,2011 Imagination Technologies
3  *
4  * This file contains the architecture-dependent parts of process handling.
5  *
6  */
7
8 #include <linux/errno.h>
9 #include <linux/export.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/unistd.h>
14 #include <linux/ptrace.h>
15 #include <linux/user.h>
16 #include <linux/reboot.h>
17 #include <linux/elfcore.h>
18 #include <linux/fs.h>
19 #include <linux/tick.h>
20 #include <linux/slab.h>
21 #include <linux/mman.h>
22 #include <linux/pm.h>
23 #include <linux/syscalls.h>
24 #include <linux/uaccess.h>
25 #include <linux/smp.h>
26 #include <asm/core_reg.h>
27 #include <asm/user_gateway.h>
28 #include <asm/tcm.h>
29 #include <asm/traps.h>
30 #include <asm/switch_to.h>
31
32 /*
33  * Wait for the next interrupt and enable local interrupts
34  */
35 void arch_cpu_idle(void)
36 {
37         int tmp;
38
39         /*
40          * Quickly jump straight into the interrupt entry point without actually
41          * triggering an interrupt. When TXSTATI gets read the processor will
42          * block until an interrupt is triggered.
43          */
44         asm volatile (/* Switch into ISTAT mode */
45                       "RTH\n\t"
46                       /* Enable local interrupts */
47                       "MOV      TXMASKI, %1\n\t"
48                       /*
49                        * We can't directly "SWAP PC, PCX", so we swap via a
50                        * temporary. Essentially we do:
51                        *  PCX_new = 1f (the place to continue execution)
52                        *  PC = PCX_old
53                        */
54                       "ADD      %0, CPC0, #(1f-.)\n\t"
55                       "SWAP     PCX, %0\n\t"
56                       "MOV      PC, %0\n"
57                       /* Continue execution here with interrupts enabled */
58                       "1:"
59                       : "=a" (tmp)
60                       : "r" (get_trigger_mask()));
61 }
62
63 #ifdef CONFIG_HOTPLUG_CPU
64 void arch_cpu_idle_dead(void)
65 {
66         cpu_die();
67 }
68 #endif
69
70 void (*pm_power_off)(void);
71 EXPORT_SYMBOL(pm_power_off);
72
73 void (*soc_restart)(char *cmd);
74 void (*soc_halt)(void);
75
76 void machine_restart(char *cmd)
77 {
78         if (soc_restart)
79                 soc_restart(cmd);
80         hard_processor_halt(HALT_OK);
81 }
82
83 void machine_halt(void)
84 {
85         if (soc_halt)
86                 soc_halt();
87         smp_send_stop();
88         hard_processor_halt(HALT_OK);
89 }
90
91 void machine_power_off(void)
92 {
93         if (pm_power_off)
94                 pm_power_off();
95         smp_send_stop();
96         hard_processor_halt(HALT_OK);
97 }
98
99 #define FLAG_Z 0x8
100 #define FLAG_N 0x4
101 #define FLAG_O 0x2
102 #define FLAG_C 0x1
103
104 void show_regs(struct pt_regs *regs)
105 {
106         int i;
107         const char *AX0_names[] = {"A0StP", "A0FrP"};
108         const char *AX1_names[] = {"A1GbP", "A1LbP"};
109
110         const char *DX0_names[] = {
111                 "D0Re0",
112                 "D0Ar6",
113                 "D0Ar4",
114                 "D0Ar2",
115                 "D0FrT",
116                 "D0.5 ",
117                 "D0.6 ",
118                 "D0.7 "
119         };
120
121         const char *DX1_names[] = {
122                 "D1Re0",
123                 "D1Ar5",
124                 "D1Ar3",
125                 "D1Ar1",
126                 "D1RtP",
127                 "D1.5 ",
128                 "D1.6 ",
129                 "D1.7 "
130         };
131
132         show_regs_print_info(KERN_INFO);
133
134         pr_info(" pt_regs @ %p\n", regs);
135         pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask);
136         pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags,
137                 regs->ctx.Flags & FLAG_Z ? 'Z' : 'z',
138                 regs->ctx.Flags & FLAG_N ? 'N' : 'n',
139                 regs->ctx.Flags & FLAG_O ? 'O' : 'o',
140                 regs->ctx.Flags & FLAG_C ? 'C' : 'c');
141         pr_info(" TXRPT = 0x%08x\n", regs->ctx.CurrRPT);
142         pr_info(" PC = 0x%08x\n", regs->ctx.CurrPC);
143
144         /* AX regs */
145         for (i = 0; i < 2; i++) {
146                 pr_info(" %s = 0x%08x    ",
147                         AX0_names[i],
148                         regs->ctx.AX[i].U0);
149                 printk(" %s = 0x%08x\n",
150                         AX1_names[i],
151                         regs->ctx.AX[i].U1);
152         }
153
154         if (regs->ctx.SaveMask & TBICTX_XEXT_BIT)
155                 pr_warn(" Extended state present - AX2.[01] will be WRONG\n");
156
157         /* Special place with AXx.2 */
158         pr_info(" A0.2  = 0x%08x    ",
159                 regs->ctx.Ext.AX2.U0);
160         printk(" A1.2  = 0x%08x\n",
161                 regs->ctx.Ext.AX2.U1);
162
163         /* 'extended' AX regs (nominally, just AXx.3) */
164         for (i = 0; i < (TBICTX_AX_REGS - 3); i++) {
165                 pr_info(" A0.%d  = 0x%08x    ", i + 3, regs->ctx.AX3[i].U0);
166                 printk(" A1.%d  = 0x%08x\n", i + 3, regs->ctx.AX3[i].U1);
167         }
168
169         for (i = 0; i < 8; i++) {
170                 pr_info(" %s = 0x%08x    ", DX0_names[i], regs->ctx.DX[i].U0);
171                 printk(" %s = 0x%08x\n", DX1_names[i], regs->ctx.DX[i].U1);
172         }
173
174         show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs);
175 }
176
177 /*
178  * Copy architecture-specific thread state
179  */
180 int copy_thread(unsigned long clone_flags, unsigned long usp,
181                 unsigned long kthread_arg, struct task_struct *tsk)
182 {
183         struct pt_regs *childregs = task_pt_regs(tsk);
184         void *kernel_context = ((void *) childregs +
185                                 sizeof(struct pt_regs));
186         unsigned long global_base;
187
188         BUG_ON(((unsigned long)childregs) & 0x7);
189         BUG_ON(((unsigned long)kernel_context) & 0x7);
190
191         memset(&tsk->thread.kernel_context, 0,
192                         sizeof(tsk->thread.kernel_context));
193
194         tsk->thread.kernel_context = __TBISwitchInit(kernel_context,
195                                                      ret_from_fork,
196                                                      0, 0);
197
198         if (unlikely(tsk->flags & PF_KTHREAD)) {
199                 /*
200                  * Make sure we don't leak any kernel data to child's regs
201                  * if kernel thread becomes a userspace thread in the future
202                  */
203                 memset(childregs, 0 , sizeof(struct pt_regs));
204
205                 global_base = __core_reg_get(A1GbP);
206                 childregs->ctx.AX[0].U1 = (unsigned long) global_base;
207                 childregs->ctx.AX[0].U0 = (unsigned long) kernel_context;
208                 /* Set D1Ar1=kthread_arg and D1RtP=usp (fn) */
209                 childregs->ctx.DX[4].U1 = usp;
210                 childregs->ctx.DX[3].U1 = kthread_arg;
211                 tsk->thread.int_depth = 2;
212                 return 0;
213         }
214
215         /*
216          * Get a pointer to where the new child's register block should have
217          * been pushed.
218          * The Meta's stack grows upwards, and the context is the the first
219          * thing to be pushed by TBX (phew)
220          */
221         *childregs = *current_pt_regs();
222         /* Set the correct stack for the clone mode */
223         if (usp)
224                 childregs->ctx.AX[0].U0 = ALIGN(usp, 8);
225         tsk->thread.int_depth = 1;
226
227         /* set return value for child process */
228         childregs->ctx.DX[0].U0 = 0;
229
230         /* The TLS pointer is passed as an argument to sys_clone. */
231         if (clone_flags & CLONE_SETTLS)
232                 tsk->thread.tls_ptr =
233                                 (__force void __user *)childregs->ctx.DX[1].U1;
234
235 #ifdef CONFIG_METAG_FPU
236         if (tsk->thread.fpu_context) {
237                 struct meta_fpu_context *ctx;
238
239                 ctx = kmemdup(tsk->thread.fpu_context,
240                               sizeof(struct meta_fpu_context), GFP_ATOMIC);
241                 tsk->thread.fpu_context = ctx;
242         }
243 #endif
244
245 #ifdef CONFIG_METAG_DSP
246         if (tsk->thread.dsp_context) {
247                 struct meta_ext_context *ctx;
248                 int i;
249
250                 ctx = kmemdup(tsk->thread.dsp_context,
251                               sizeof(struct meta_ext_context), GFP_ATOMIC);
252                 for (i = 0; i < 2; i++)
253                         ctx->ram[i] = kmemdup(ctx->ram[i], ctx->ram_sz[i],
254                                               GFP_ATOMIC);
255                 tsk->thread.dsp_context = ctx;
256         }
257 #endif
258
259         return 0;
260 }
261
262 #ifdef CONFIG_METAG_FPU
263 static void alloc_fpu_context(struct thread_struct *thread)
264 {
265         thread->fpu_context = kzalloc(sizeof(struct meta_fpu_context),
266                                       GFP_ATOMIC);
267 }
268
269 static void clear_fpu(struct thread_struct *thread)
270 {
271         thread->user_flags &= ~TBICTX_FPAC_BIT;
272         kfree(thread->fpu_context);
273         thread->fpu_context = NULL;
274 }
275 #else
276 static void clear_fpu(struct thread_struct *thread)
277 {
278 }
279 #endif
280
281 #ifdef CONFIG_METAG_DSP
282 static void clear_dsp(struct thread_struct *thread)
283 {
284         if (thread->dsp_context) {
285                 kfree(thread->dsp_context->ram[0]);
286                 kfree(thread->dsp_context->ram[1]);
287
288                 kfree(thread->dsp_context);
289
290                 thread->dsp_context = NULL;
291         }
292
293         __core_reg_set(D0.8, 0);
294 }
295 #else
296 static void clear_dsp(struct thread_struct *thread)
297 {
298 }
299 #endif
300
301 struct task_struct *__sched __switch_to(struct task_struct *prev,
302                                         struct task_struct *next)
303 {
304         TBIRES to, from;
305
306         to.Switch.pCtx = next->thread.kernel_context;
307         to.Switch.pPara = prev;
308
309 #ifdef CONFIG_METAG_FPU
310         if (prev->thread.user_flags & TBICTX_FPAC_BIT) {
311                 struct pt_regs *regs = task_pt_regs(prev);
312                 TBIRES state;
313
314                 state.Sig.SaveMask = prev->thread.user_flags;
315                 state.Sig.pCtx = &regs->ctx;
316
317                 if (!prev->thread.fpu_context)
318                         alloc_fpu_context(&prev->thread);
319                 if (prev->thread.fpu_context)
320                         __TBICtxFPUSave(state, prev->thread.fpu_context);
321         }
322         /*
323          * Force a restore of the FPU context next time this process is
324          * scheduled.
325          */
326         if (prev->thread.fpu_context)
327                 prev->thread.fpu_context->needs_restore = true;
328 #endif
329
330
331         from = __TBISwitch(to, &prev->thread.kernel_context);
332
333         /* Restore TLS pointer for this process. */
334         set_gateway_tls(current->thread.tls_ptr);
335
336         return (struct task_struct *) from.Switch.pPara;
337 }
338
339 void flush_thread(void)
340 {
341         clear_fpu(&current->thread);
342         clear_dsp(&current->thread);
343 }
344
345 /*
346  * Free current thread data structures etc.
347  */
348 void exit_thread(void)
349 {
350         clear_fpu(&current->thread);
351         clear_dsp(&current->thread);
352 }
353
354 /* TODO: figure out how to unwind the kernel stack here to figure out
355  * where we went to sleep. */
356 unsigned long get_wchan(struct task_struct *p)
357 {
358         return 0;
359 }
360
361 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
362 {
363         /* Returning 0 indicates that the FPU state was not stored (as it was
364          * not in use) */
365         return 0;
366 }
367
368 #ifdef CONFIG_METAG_USER_TCM
369
370 #define ELF_MIN_ALIGN   PAGE_SIZE
371
372 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
373 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
374 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
375
376 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
377
378 unsigned long __metag_elf_map(struct file *filep, unsigned long addr,
379                               struct elf_phdr *eppnt, int prot, int type,
380                               unsigned long total_size)
381 {
382         unsigned long map_addr, size;
383         unsigned long page_off = ELF_PAGEOFFSET(eppnt->p_vaddr);
384         unsigned long raw_size = eppnt->p_filesz + page_off;
385         unsigned long off = eppnt->p_offset - page_off;
386         unsigned int tcm_tag;
387         addr = ELF_PAGESTART(addr);
388         size = ELF_PAGEALIGN(raw_size);
389
390         /* mmap() will return -EINVAL if given a zero size, but a
391          * segment with zero filesize is perfectly valid */
392         if (!size)
393                 return addr;
394
395         tcm_tag = tcm_lookup_tag(addr);
396
397         if (tcm_tag != TCM_INVALID_TAG)
398                 type &= ~MAP_FIXED;
399
400         /*
401         * total_size is the size of the ELF (interpreter) image.
402         * The _first_ mmap needs to know the full size, otherwise
403         * randomization might put this image into an overlapping
404         * position with the ELF binary image. (since size < total_size)
405         * So we first map the 'big' image - and unmap the remainder at
406         * the end. (which unmap is needed for ELF images with holes.)
407         */
408         if (total_size) {
409                 total_size = ELF_PAGEALIGN(total_size);
410                 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
411                 if (!BAD_ADDR(map_addr))
412                         vm_munmap(map_addr+size, total_size-size);
413         } else
414                 map_addr = vm_mmap(filep, addr, size, prot, type, off);
415
416         if (!BAD_ADDR(map_addr) && tcm_tag != TCM_INVALID_TAG) {
417                 struct tcm_allocation *tcm;
418                 unsigned long tcm_addr;
419
420                 tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
421                 if (!tcm)
422                         return -ENOMEM;
423
424                 tcm_addr = tcm_alloc(tcm_tag, raw_size);
425                 if (tcm_addr != addr) {
426                         kfree(tcm);
427                         return -ENOMEM;
428                 }
429
430                 tcm->tag = tcm_tag;
431                 tcm->addr = tcm_addr;
432                 tcm->size = raw_size;
433
434                 list_add(&tcm->list, &current->mm->context.tcm);
435
436                 eppnt->p_vaddr = map_addr;
437                 if (copy_from_user((void *) addr, (void __user *) map_addr,
438                                    raw_size))
439                         return -EFAULT;
440         }
441
442         return map_addr;
443 }
444 #endif