These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / mips / include / asm / uaccess.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/thread_info.h>
17 #include <asm/asm-eva.h>
18
19 /*
20  * The fs value determines whether argument validity checking should be
21  * performed or not.  If get_fs() == USER_DS, checking is performed, with
22  * get_fs() == KERNEL_DS, checking is bypassed.
23  *
24  * For historical reasons, these macros are grossly misnamed.
25  */
26 #ifdef CONFIG_32BIT
27
28 #ifdef CONFIG_KVM_GUEST
29 #define __UA_LIMIT 0x40000000UL
30 #else
31 #define __UA_LIMIT 0x80000000UL
32 #endif
33
34 #define __UA_ADDR       ".word"
35 #define __UA_LA         "la"
36 #define __UA_ADDU       "addu"
37 #define __UA_t0         "$8"
38 #define __UA_t1         "$9"
39
40 #endif /* CONFIG_32BIT */
41
42 #ifdef CONFIG_64BIT
43
44 extern u64 __ua_limit;
45
46 #define __UA_LIMIT      __ua_limit
47
48 #define __UA_ADDR       ".dword"
49 #define __UA_LA         "dla"
50 #define __UA_ADDU       "daddu"
51 #define __UA_t0         "$12"
52 #define __UA_t1         "$13"
53
54 #endif /* CONFIG_64BIT */
55
56 /*
57  * USER_DS is a bitmask that has the bits set that may not be set in a valid
58  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
59  * the arithmetic we're doing only works if the limit is a power of two, so
60  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
61  * address in this range it's the process's problem, not ours :-)
62  */
63
64 #ifdef CONFIG_KVM_GUEST
65 #define KERNEL_DS       ((mm_segment_t) { 0x80000000UL })
66 #define USER_DS         ((mm_segment_t) { 0xC0000000UL })
67 #else
68 #define KERNEL_DS       ((mm_segment_t) { 0UL })
69 #define USER_DS         ((mm_segment_t) { __UA_LIMIT })
70 #endif
71
72 #define VERIFY_READ    0
73 #define VERIFY_WRITE   1
74
75 #define get_ds()        (KERNEL_DS)
76 #define get_fs()        (current_thread_info()->addr_limit)
77 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
78
79 #define segment_eq(a, b)        ((a).seg == (b).seg)
80
81 /*
82  * eva_kernel_access() - determine whether kernel memory access on an EVA system
83  *
84  * Determines whether memory accesses should be performed to kernel memory
85  * on a system using Extended Virtual Addressing (EVA).
86  *
87  * Return: true if a kernel memory access on an EVA system, else false.
88  */
89 static inline bool eva_kernel_access(void)
90 {
91         if (!config_enabled(CONFIG_EVA))
92                 return false;
93
94         return segment_eq(get_fs(), get_ds());
95 }
96
97 /*
98  * Is a address valid? This does a straighforward calculation rather
99  * than tests.
100  *
101  * Address valid if:
102  *  - "addr" doesn't have any high-bits set
103  *  - AND "size" doesn't have any high-bits set
104  *  - AND "addr+size" doesn't have any high-bits set
105  *  - OR we are in kernel mode.
106  *
107  * __ua_size() is a trick to avoid runtime checking of positive constant
108  * sizes; for those we already know at compile time that the size is ok.
109  */
110 #define __ua_size(size)                                                 \
111         ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
112
113 /*
114  * access_ok: - Checks if a user space pointer is valid
115  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
116  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
117  *        to write to a block, it is always safe to read from it.
118  * @addr: User space pointer to start of block to check
119  * @size: Size of block to check
120  *
121  * Context: User context only. This function may sleep if pagefaults are
122  *          enabled.
123  *
124  * Checks if a pointer to a block of memory in user space is valid.
125  *
126  * Returns true (nonzero) if the memory block may be valid, false (zero)
127  * if it is definitely invalid.
128  *
129  * Note that, depending on architecture, this function probably just
130  * checks that the pointer is in the user space range - after calling
131  * this function, memory access functions may still return -EFAULT.
132  */
133
134 #define __access_mask get_fs().seg
135
136 #define __access_ok(addr, size, mask)                                   \
137 ({                                                                      \
138         unsigned long __addr = (unsigned long) (addr);                  \
139         unsigned long __size = size;                                    \
140         unsigned long __mask = mask;                                    \
141         unsigned long __ok;                                             \
142                                                                         \
143         __chk_user_ptr(addr);                                           \
144         __ok = (signed long)(__mask & (__addr | (__addr + __size) |     \
145                 __ua_size(__size)));                                    \
146         __ok == 0;                                                      \
147 })
148
149 #define access_ok(type, addr, size)                                     \
150         likely(__access_ok((addr), (size), __access_mask))
151
152 /*
153  * put_user: - Write a simple value into user space.
154  * @x:   Value to copy to user space.
155  * @ptr: Destination address, in user space.
156  *
157  * Context: User context only. This function may sleep if pagefaults are
158  *          enabled.
159  *
160  * This macro copies a single simple value from kernel space to user
161  * space.  It supports simple types like char and int, but not larger
162  * data types like structures or arrays.
163  *
164  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
165  * to the result of dereferencing @ptr.
166  *
167  * Returns zero on success, or -EFAULT on error.
168  */
169 #define put_user(x,ptr) \
170         __put_user_check((x), (ptr), sizeof(*(ptr)))
171
172 /*
173  * get_user: - Get a simple variable from user space.
174  * @x:   Variable to store result.
175  * @ptr: Source address, in user space.
176  *
177  * Context: User context only. This function may sleep if pagefaults are
178  *          enabled.
179  *
180  * This macro copies a single simple variable from user space to kernel
181  * space.  It supports simple types like char and int, but not larger
182  * data types like structures or arrays.
183  *
184  * @ptr must have pointer-to-simple-variable type, and the result of
185  * dereferencing @ptr must be assignable to @x without a cast.
186  *
187  * Returns zero on success, or -EFAULT on error.
188  * On error, the variable @x is set to zero.
189  */
190 #define get_user(x,ptr) \
191         __get_user_check((x), (ptr), sizeof(*(ptr)))
192
193 /*
194  * __put_user: - Write a simple value into user space, with less checking.
195  * @x:   Value to copy to user space.
196  * @ptr: Destination address, in user space.
197  *
198  * Context: User context only. This function may sleep if pagefaults are
199  *          enabled.
200  *
201  * This macro copies a single simple value from kernel space to user
202  * space.  It supports simple types like char and int, but not larger
203  * data types like structures or arrays.
204  *
205  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
206  * to the result of dereferencing @ptr.
207  *
208  * Caller must check the pointer with access_ok() before calling this
209  * function.
210  *
211  * Returns zero on success, or -EFAULT on error.
212  */
213 #define __put_user(x,ptr) \
214         __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
215
216 /*
217  * __get_user: - Get a simple variable from user space, with less checking.
218  * @x:   Variable to store result.
219  * @ptr: Source address, in user space.
220  *
221  * Context: User context only. This function may sleep if pagefaults are
222  *          enabled.
223  *
224  * This macro copies a single simple variable from user space to kernel
225  * space.  It supports simple types like char and int, but not larger
226  * data types like structures or arrays.
227  *
228  * @ptr must have pointer-to-simple-variable type, and the result of
229  * dereferencing @ptr must be assignable to @x without a cast.
230  *
231  * Caller must check the pointer with access_ok() before calling this
232  * function.
233  *
234  * Returns zero on success, or -EFAULT on error.
235  * On error, the variable @x is set to zero.
236  */
237 #define __get_user(x,ptr) \
238         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
239
240 struct __large_struct { unsigned long buf[100]; };
241 #define __m(x) (*(struct __large_struct __user *)(x))
242
243 /*
244  * Yuck.  We need two variants, one for 64bit operation and one
245  * for 32 bit mode and old iron.
246  */
247 #ifndef CONFIG_EVA
248 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
249 #else
250 /*
251  * Kernel specific functions for EVA. We need to use normal load instructions
252  * to read data from kernel when operating in EVA mode. We use these macros to
253  * avoid redefining __get_user_asm for EVA.
254  */
255 #undef _loadd
256 #undef _loadw
257 #undef _loadh
258 #undef _loadb
259 #ifdef CONFIG_32BIT
260 #define _loadd                  _loadw
261 #else
262 #define _loadd(reg, addr)       "ld " reg ", " addr
263 #endif
264 #define _loadw(reg, addr)       "lw " reg ", " addr
265 #define _loadh(reg, addr)       "lh " reg ", " addr
266 #define _loadb(reg, addr)       "lb " reg ", " addr
267
268 #define __get_kernel_common(val, size, ptr)                             \
269 do {                                                                    \
270         switch (size) {                                                 \
271         case 1: __get_data_asm(val, _loadb, ptr); break;                \
272         case 2: __get_data_asm(val, _loadh, ptr); break;                \
273         case 4: __get_data_asm(val, _loadw, ptr); break;                \
274         case 8: __GET_DW(val, _loadd, ptr); break;                      \
275         default: __get_user_unknown(); break;                           \
276         }                                                               \
277 } while (0)
278 #endif
279
280 #ifdef CONFIG_32BIT
281 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
282 #endif
283 #ifdef CONFIG_64BIT
284 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
285 #endif
286
287 extern void __get_user_unknown(void);
288
289 #define __get_user_common(val, size, ptr)                               \
290 do {                                                                    \
291         switch (size) {                                                 \
292         case 1: __get_data_asm(val, user_lb, ptr); break;               \
293         case 2: __get_data_asm(val, user_lh, ptr); break;               \
294         case 4: __get_data_asm(val, user_lw, ptr); break;               \
295         case 8: __GET_DW(val, user_ld, ptr); break;                     \
296         default: __get_user_unknown(); break;                           \
297         }                                                               \
298 } while (0)
299
300 #define __get_user_nocheck(x, ptr, size)                                \
301 ({                                                                      \
302         int __gu_err;                                                   \
303                                                                         \
304         if (eva_kernel_access()) {                                      \
305                 __get_kernel_common((x), size, ptr);                    \
306         } else {                                                        \
307                 __chk_user_ptr(ptr);                                    \
308                 __get_user_common((x), size, ptr);                      \
309         }                                                               \
310         __gu_err;                                                       \
311 })
312
313 #define __get_user_check(x, ptr, size)                                  \
314 ({                                                                      \
315         int __gu_err = -EFAULT;                                         \
316         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
317                                                                         \
318         might_fault();                                                  \
319         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {          \
320                 if (eva_kernel_access())                                \
321                         __get_kernel_common((x), size, __gu_ptr);       \
322                 else                                                    \
323                         __get_user_common((x), size, __gu_ptr);         \
324         } else                                                          \
325                 (x) = 0;                                                \
326                                                                         \
327         __gu_err;                                                       \
328 })
329
330 #define __get_data_asm(val, insn, addr)                                 \
331 {                                                                       \
332         long __gu_tmp;                                                  \
333                                                                         \
334         __asm__ __volatile__(                                           \
335         "1:     "insn("%1", "%3")"                              \n"     \
336         "2:                                                     \n"     \
337         "       .insn                                           \n"     \
338         "       .section .fixup,\"ax\"                          \n"     \
339         "3:     li      %0, %4                                  \n"     \
340         "       move    %1, $0                                  \n"     \
341         "       j       2b                                      \n"     \
342         "       .previous                                       \n"     \
343         "       .section __ex_table,\"a\"                       \n"     \
344         "       "__UA_ADDR "\t1b, 3b                            \n"     \
345         "       .previous                                       \n"     \
346         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
347         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
348                                                                         \
349         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
350 }
351
352 /*
353  * Get a long long 64 using 32 bit registers.
354  */
355 #define __get_data_asm_ll32(val, insn, addr)                            \
356 {                                                                       \
357         union {                                                         \
358                 unsigned long long      l;                              \
359                 __typeof__(*(addr))     t;                              \
360         } __gu_tmp;                                                     \
361                                                                         \
362         __asm__ __volatile__(                                           \
363         "1:     " insn("%1", "(%3)")"                           \n"     \
364         "2:     " insn("%D1", "4(%3)")"                         \n"     \
365         "3:                                                     \n"     \
366         "       .insn                                           \n"     \
367         "       .section        .fixup,\"ax\"                   \n"     \
368         "4:     li      %0, %4                                  \n"     \
369         "       move    %1, $0                                  \n"     \
370         "       move    %D1, $0                                 \n"     \
371         "       j       3b                                      \n"     \
372         "       .previous                                       \n"     \
373         "       .section        __ex_table,\"a\"                \n"     \
374         "       " __UA_ADDR "   1b, 4b                          \n"     \
375         "       " __UA_ADDR "   2b, 4b                          \n"     \
376         "       .previous                                       \n"     \
377         : "=r" (__gu_err), "=&r" (__gu_tmp.l)                           \
378         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
379                                                                         \
380         (val) = __gu_tmp.t;                                             \
381 }
382
383 #ifndef CONFIG_EVA
384 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
385 #else
386 /*
387  * Kernel specific functions for EVA. We need to use normal load instructions
388  * to read data from kernel when operating in EVA mode. We use these macros to
389  * avoid redefining __get_data_asm for EVA.
390  */
391 #undef _stored
392 #undef _storew
393 #undef _storeh
394 #undef _storeb
395 #ifdef CONFIG_32BIT
396 #define _stored                 _storew
397 #else
398 #define _stored(reg, addr)      "ld " reg ", " addr
399 #endif
400
401 #define _storew(reg, addr)      "sw " reg ", " addr
402 #define _storeh(reg, addr)      "sh " reg ", " addr
403 #define _storeb(reg, addr)      "sb " reg ", " addr
404
405 #define __put_kernel_common(ptr, size)                                  \
406 do {                                                                    \
407         switch (size) {                                                 \
408         case 1: __put_data_asm(_storeb, ptr); break;                    \
409         case 2: __put_data_asm(_storeh, ptr); break;                    \
410         case 4: __put_data_asm(_storew, ptr); break;                    \
411         case 8: __PUT_DW(_stored, ptr); break;                          \
412         default: __put_user_unknown(); break;                           \
413         }                                                               \
414 } while(0)
415 #endif
416
417 /*
418  * Yuck.  We need two variants, one for 64bit operation and one
419  * for 32 bit mode and old iron.
420  */
421 #ifdef CONFIG_32BIT
422 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
423 #endif
424 #ifdef CONFIG_64BIT
425 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
426 #endif
427
428 #define __put_user_common(ptr, size)                                    \
429 do {                                                                    \
430         switch (size) {                                                 \
431         case 1: __put_data_asm(user_sb, ptr); break;                    \
432         case 2: __put_data_asm(user_sh, ptr); break;                    \
433         case 4: __put_data_asm(user_sw, ptr); break;                    \
434         case 8: __PUT_DW(user_sd, ptr); break;                          \
435         default: __put_user_unknown(); break;                           \
436         }                                                               \
437 } while (0)
438
439 #define __put_user_nocheck(x, ptr, size)                                \
440 ({                                                                      \
441         __typeof__(*(ptr)) __pu_val;                                    \
442         int __pu_err = 0;                                               \
443                                                                         \
444         __pu_val = (x);                                                 \
445         if (eva_kernel_access()) {                                      \
446                 __put_kernel_common(ptr, size);                         \
447         } else {                                                        \
448                 __chk_user_ptr(ptr);                                    \
449                 __put_user_common(ptr, size);                           \
450         }                                                               \
451         __pu_err;                                                       \
452 })
453
454 #define __put_user_check(x, ptr, size)                                  \
455 ({                                                                      \
456         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
457         __typeof__(*(ptr)) __pu_val = (x);                              \
458         int __pu_err = -EFAULT;                                         \
459                                                                         \
460         might_fault();                                                  \
461         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
462                 if (eva_kernel_access())                                \
463                         __put_kernel_common(__pu_addr, size);           \
464                 else                                                    \
465                         __put_user_common(__pu_addr, size);             \
466         }                                                               \
467                                                                         \
468         __pu_err;                                                       \
469 })
470
471 #define __put_data_asm(insn, ptr)                                       \
472 {                                                                       \
473         __asm__ __volatile__(                                           \
474         "1:     "insn("%z2", "%3")"     # __put_data_asm        \n"     \
475         "2:                                                     \n"     \
476         "       .insn                                           \n"     \
477         "       .section        .fixup,\"ax\"                   \n"     \
478         "3:     li      %0, %4                                  \n"     \
479         "       j       2b                                      \n"     \
480         "       .previous                                       \n"     \
481         "       .section        __ex_table,\"a\"                \n"     \
482         "       " __UA_ADDR "   1b, 3b                          \n"     \
483         "       .previous                                       \n"     \
484         : "=r" (__pu_err)                                               \
485         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
486           "i" (-EFAULT));                                               \
487 }
488
489 #define __put_data_asm_ll32(insn, ptr)                                  \
490 {                                                                       \
491         __asm__ __volatile__(                                           \
492         "1:     "insn("%2", "(%3)")"    # __put_data_asm_ll32   \n"     \
493         "2:     "insn("%D2", "4(%3)")"                          \n"     \
494         "3:                                                     \n"     \
495         "       .insn                                           \n"     \
496         "       .section        .fixup,\"ax\"                   \n"     \
497         "4:     li      %0, %4                                  \n"     \
498         "       j       3b                                      \n"     \
499         "       .previous                                       \n"     \
500         "       .section        __ex_table,\"a\"                \n"     \
501         "       " __UA_ADDR "   1b, 4b                          \n"     \
502         "       " __UA_ADDR "   2b, 4b                          \n"     \
503         "       .previous"                                              \
504         : "=r" (__pu_err)                                               \
505         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
506           "i" (-EFAULT));                                               \
507 }
508
509 extern void __put_user_unknown(void);
510
511 /*
512  * ul{b,h,w} are macros and there are no equivalent macros for EVA.
513  * EVA unaligned access is handled in the ADE exception handler.
514  */
515 #ifndef CONFIG_EVA
516 /*
517  * put_user_unaligned: - Write a simple value into user space.
518  * @x:   Value to copy to user space.
519  * @ptr: Destination address, in user space.
520  *
521  * Context: User context only. This function may sleep if pagefaults are
522  *          enabled.
523  *
524  * This macro copies a single simple value from kernel space to user
525  * space.  It supports simple types like char and int, but not larger
526  * data types like structures or arrays.
527  *
528  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
529  * to the result of dereferencing @ptr.
530  *
531  * Returns zero on success, or -EFAULT on error.
532  */
533 #define put_user_unaligned(x,ptr)       \
534         __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
535
536 /*
537  * get_user_unaligned: - Get a simple variable from user space.
538  * @x:   Variable to store result.
539  * @ptr: Source address, in user space.
540  *
541  * Context: User context only. This function may sleep if pagefaults are
542  *          enabled.
543  *
544  * This macro copies a single simple variable from user space to kernel
545  * space.  It supports simple types like char and int, but not larger
546  * data types like structures or arrays.
547  *
548  * @ptr must have pointer-to-simple-variable type, and the result of
549  * dereferencing @ptr must be assignable to @x without a cast.
550  *
551  * Returns zero on success, or -EFAULT on error.
552  * On error, the variable @x is set to zero.
553  */
554 #define get_user_unaligned(x,ptr) \
555         __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
556
557 /*
558  * __put_user_unaligned: - Write a simple value into user space, with less checking.
559  * @x:   Value to copy to user space.
560  * @ptr: Destination address, in user space.
561  *
562  * Context: User context only. This function may sleep if pagefaults are
563  *          enabled.
564  *
565  * This macro copies a single simple value from kernel space to user
566  * space.  It supports simple types like char and int, but not larger
567  * data types like structures or arrays.
568  *
569  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
570  * to the result of dereferencing @ptr.
571  *
572  * Caller must check the pointer with access_ok() before calling this
573  * function.
574  *
575  * Returns zero on success, or -EFAULT on error.
576  */
577 #define __put_user_unaligned(x,ptr) \
578         __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
579
580 /*
581  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
582  * @x:   Variable to store result.
583  * @ptr: Source address, in user space.
584  *
585  * Context: User context only. This function may sleep if pagefaults are
586  *          enabled.
587  *
588  * This macro copies a single simple variable from user space to kernel
589  * space.  It supports simple types like char and int, but not larger
590  * data types like structures or arrays.
591  *
592  * @ptr must have pointer-to-simple-variable type, and the result of
593  * dereferencing @ptr must be assignable to @x without a cast.
594  *
595  * Caller must check the pointer with access_ok() before calling this
596  * function.
597  *
598  * Returns zero on success, or -EFAULT on error.
599  * On error, the variable @x is set to zero.
600  */
601 #define __get_user_unaligned(x,ptr) \
602         __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
603
604 /*
605  * Yuck.  We need two variants, one for 64bit operation and one
606  * for 32 bit mode and old iron.
607  */
608 #ifdef CONFIG_32BIT
609 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
610         __get_user_unaligned_asm_ll32(val, ptr)
611 #endif
612 #ifdef CONFIG_64BIT
613 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
614         __get_user_unaligned_asm(val, "uld", ptr)
615 #endif
616
617 extern void __get_user_unaligned_unknown(void);
618
619 #define __get_user_unaligned_common(val, size, ptr)                     \
620 do {                                                                    \
621         switch (size) {                                                 \
622         case 1: __get_data_asm(val, "lb", ptr); break;                  \
623         case 2: __get_data_unaligned_asm(val, "ulh", ptr); break;       \
624         case 4: __get_data_unaligned_asm(val, "ulw", ptr); break;       \
625         case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;               \
626         default: __get_user_unaligned_unknown(); break;                 \
627         }                                                               \
628 } while (0)
629
630 #define __get_user_unaligned_nocheck(x,ptr,size)                        \
631 ({                                                                      \
632         int __gu_err;                                                   \
633                                                                         \
634         __get_user_unaligned_common((x), size, ptr);                    \
635         __gu_err;                                                       \
636 })
637
638 #define __get_user_unaligned_check(x,ptr,size)                          \
639 ({                                                                      \
640         int __gu_err = -EFAULT;                                         \
641         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
642                                                                         \
643         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            \
644                 __get_user_unaligned_common((x), size, __gu_ptr);       \
645                                                                         \
646         __gu_err;                                                       \
647 })
648
649 #define __get_data_unaligned_asm(val, insn, addr)                       \
650 {                                                                       \
651         long __gu_tmp;                                                  \
652                                                                         \
653         __asm__ __volatile__(                                           \
654         "1:     " insn "        %1, %3                          \n"     \
655         "2:                                                     \n"     \
656         "       .insn                                           \n"     \
657         "       .section .fixup,\"ax\"                          \n"     \
658         "3:     li      %0, %4                                  \n"     \
659         "       move    %1, $0                                  \n"     \
660         "       j       2b                                      \n"     \
661         "       .previous                                       \n"     \
662         "       .section __ex_table,\"a\"                       \n"     \
663         "       "__UA_ADDR "\t1b, 3b                            \n"     \
664         "       "__UA_ADDR "\t1b + 4, 3b                        \n"     \
665         "       .previous                                       \n"     \
666         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
667         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
668                                                                         \
669         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
670 }
671
672 /*
673  * Get a long long 64 using 32 bit registers.
674  */
675 #define __get_user_unaligned_asm_ll32(val, addr)                        \
676 {                                                                       \
677         unsigned long long __gu_tmp;                                    \
678                                                                         \
679         __asm__ __volatile__(                                           \
680         "1:     ulw     %1, (%3)                                \n"     \
681         "2:     ulw     %D1, 4(%3)                              \n"     \
682         "       move    %0, $0                                  \n"     \
683         "3:                                                     \n"     \
684         "       .insn                                           \n"     \
685         "       .section        .fixup,\"ax\"                   \n"     \
686         "4:     li      %0, %4                                  \n"     \
687         "       move    %1, $0                                  \n"     \
688         "       move    %D1, $0                                 \n"     \
689         "       j       3b                                      \n"     \
690         "       .previous                                       \n"     \
691         "       .section        __ex_table,\"a\"                \n"     \
692         "       " __UA_ADDR "   1b, 4b                          \n"     \
693         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
694         "       " __UA_ADDR "   2b, 4b                          \n"     \
695         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
696         "       .previous                                       \n"     \
697         : "=r" (__gu_err), "=&r" (__gu_tmp)                             \
698         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
699         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
700 }
701
702 /*
703  * Yuck.  We need two variants, one for 64bit operation and one
704  * for 32 bit mode and old iron.
705  */
706 #ifdef CONFIG_32BIT
707 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
708 #endif
709 #ifdef CONFIG_64BIT
710 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
711 #endif
712
713 #define __put_user_unaligned_common(ptr, size)                          \
714 do {                                                                    \
715         switch (size) {                                                 \
716         case 1: __put_data_asm("sb", ptr); break;                       \
717         case 2: __put_user_unaligned_asm("ush", ptr); break;            \
718         case 4: __put_user_unaligned_asm("usw", ptr); break;            \
719         case 8: __PUT_USER_UNALIGNED_DW(ptr); break;                    \
720         default: __put_user_unaligned_unknown(); break;                 \
721 } while (0)
722
723 #define __put_user_unaligned_nocheck(x,ptr,size)                        \
724 ({                                                                      \
725         __typeof__(*(ptr)) __pu_val;                                    \
726         int __pu_err = 0;                                               \
727                                                                         \
728         __pu_val = (x);                                                 \
729         __put_user_unaligned_common(ptr, size);                         \
730         __pu_err;                                                       \
731 })
732
733 #define __put_user_unaligned_check(x,ptr,size)                          \
734 ({                                                                      \
735         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
736         __typeof__(*(ptr)) __pu_val = (x);                              \
737         int __pu_err = -EFAULT;                                         \
738                                                                         \
739         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))          \
740                 __put_user_unaligned_common(__pu_addr, size);           \
741                                                                         \
742         __pu_err;                                                       \
743 })
744
745 #define __put_user_unaligned_asm(insn, ptr)                             \
746 {                                                                       \
747         __asm__ __volatile__(                                           \
748         "1:     " insn "        %z2, %3         # __put_user_unaligned_asm\n" \
749         "2:                                                     \n"     \
750         "       .insn                                           \n"     \
751         "       .section        .fixup,\"ax\"                   \n"     \
752         "3:     li      %0, %4                                  \n"     \
753         "       j       2b                                      \n"     \
754         "       .previous                                       \n"     \
755         "       .section        __ex_table,\"a\"                \n"     \
756         "       " __UA_ADDR "   1b, 3b                          \n"     \
757         "       .previous                                       \n"     \
758         : "=r" (__pu_err)                                               \
759         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
760           "i" (-EFAULT));                                               \
761 }
762
763 #define __put_user_unaligned_asm_ll32(ptr)                              \
764 {                                                                       \
765         __asm__ __volatile__(                                           \
766         "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
767         "2:     sw      %D2, 4(%3)                              \n"     \
768         "3:                                                     \n"     \
769         "       .insn                                           \n"     \
770         "       .section        .fixup,\"ax\"                   \n"     \
771         "4:     li      %0, %4                                  \n"     \
772         "       j       3b                                      \n"     \
773         "       .previous                                       \n"     \
774         "       .section        __ex_table,\"a\"                \n"     \
775         "       " __UA_ADDR "   1b, 4b                          \n"     \
776         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
777         "       " __UA_ADDR "   2b, 4b                          \n"     \
778         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
779         "       .previous"                                              \
780         : "=r" (__pu_err)                                               \
781         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
782           "i" (-EFAULT));                                               \
783 }
784
785 extern void __put_user_unaligned_unknown(void);
786 #endif
787
788 /*
789  * We're generating jump to subroutines which will be outside the range of
790  * jump instructions
791  */
792 #ifdef MODULE
793 #define __MODULE_JAL(destination)                                       \
794         ".set\tnoat\n\t"                                                \
795         __UA_LA "\t$1, " #destination "\n\t"                            \
796         "jalr\t$1\n\t"                                                  \
797         ".set\tat\n\t"
798 #else
799 #define __MODULE_JAL(destination)                                       \
800         "jal\t" #destination "\n\t"
801 #endif
802
803 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&    \
804                                               defined(CONFIG_CPU_HAS_PREFETCH))
805 #define DADDI_SCRATCH "$3"
806 #else
807 #define DADDI_SCRATCH "$0"
808 #endif
809
810 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
811
812 #ifndef CONFIG_EVA
813 #define __invoke_copy_to_user(to, from, n)                              \
814 ({                                                                      \
815         register void __user *__cu_to_r __asm__("$4");                  \
816         register const void *__cu_from_r __asm__("$5");                 \
817         register long __cu_len_r __asm__("$6");                         \
818                                                                         \
819         __cu_to_r = (to);                                               \
820         __cu_from_r = (from);                                           \
821         __cu_len_r = (n);                                               \
822         __asm__ __volatile__(                                           \
823         __MODULE_JAL(__copy_user)                                       \
824         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
825         :                                                               \
826         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
827           DADDI_SCRATCH, "memory");                                     \
828         __cu_len_r;                                                     \
829 })
830
831 #define __invoke_copy_to_kernel(to, from, n)                            \
832         __invoke_copy_to_user(to, from, n)
833
834 #endif
835
836 /*
837  * __copy_to_user: - Copy a block of data into user space, with less checking.
838  * @to:   Destination address, in user space.
839  * @from: Source address, in kernel space.
840  * @n:    Number of bytes to copy.
841  *
842  * Context: User context only. This function may sleep if pagefaults are
843  *          enabled.
844  *
845  * Copy data from kernel space to user space.  Caller must check
846  * the specified block with access_ok() before calling this function.
847  *
848  * Returns number of bytes that could not be copied.
849  * On success, this will be zero.
850  */
851 #define __copy_to_user(to, from, n)                                     \
852 ({                                                                      \
853         void __user *__cu_to;                                           \
854         const void *__cu_from;                                          \
855         long __cu_len;                                                  \
856                                                                         \
857         __cu_to = (to);                                                 \
858         __cu_from = (from);                                             \
859         __cu_len = (n);                                                 \
860         might_fault();                                                  \
861         if (eva_kernel_access())                                        \
862                 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
863                                                    __cu_len);           \
864         else                                                            \
865                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
866                                                  __cu_len);             \
867         __cu_len;                                                       \
868 })
869
870 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
871
872 #define __copy_to_user_inatomic(to, from, n)                            \
873 ({                                                                      \
874         void __user *__cu_to;                                           \
875         const void *__cu_from;                                          \
876         long __cu_len;                                                  \
877                                                                         \
878         __cu_to = (to);                                                 \
879         __cu_from = (from);                                             \
880         __cu_len = (n);                                                 \
881         if (eva_kernel_access())                                        \
882                 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
883                                                    __cu_len);           \
884         else                                                            \
885                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
886                                                  __cu_len);             \
887         __cu_len;                                                       \
888 })
889
890 #define __copy_from_user_inatomic(to, from, n)                          \
891 ({                                                                      \
892         void *__cu_to;                                                  \
893         const void __user *__cu_from;                                   \
894         long __cu_len;                                                  \
895                                                                         \
896         __cu_to = (to);                                                 \
897         __cu_from = (from);                                             \
898         __cu_len = (n);                                                 \
899         if (eva_kernel_access())                                        \
900                 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,  \
901                                                               __cu_from,\
902                                                               __cu_len);\
903         else                                                            \
904                 __cu_len = __invoke_copy_from_user_inatomic(__cu_to,    \
905                                                             __cu_from,  \
906                                                             __cu_len);  \
907         __cu_len;                                                       \
908 })
909
910 /*
911  * copy_to_user: - Copy a block of data into user space.
912  * @to:   Destination address, in user space.
913  * @from: Source address, in kernel space.
914  * @n:    Number of bytes to copy.
915  *
916  * Context: User context only. This function may sleep if pagefaults are
917  *          enabled.
918  *
919  * Copy data from kernel space to user space.
920  *
921  * Returns number of bytes that could not be copied.
922  * On success, this will be zero.
923  */
924 #define copy_to_user(to, from, n)                                       \
925 ({                                                                      \
926         void __user *__cu_to;                                           \
927         const void *__cu_from;                                          \
928         long __cu_len;                                                  \
929                                                                         \
930         __cu_to = (to);                                                 \
931         __cu_from = (from);                                             \
932         __cu_len = (n);                                                 \
933         if (eva_kernel_access()) {                                      \
934                 __cu_len = __invoke_copy_to_kernel(__cu_to,             \
935                                                    __cu_from,           \
936                                                    __cu_len);           \
937         } else {                                                        \
938                 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
939                         might_fault();                                  \
940                         __cu_len = __invoke_copy_to_user(__cu_to,       \
941                                                          __cu_from,     \
942                                                          __cu_len);     \
943                 }                                                       \
944         }                                                               \
945         __cu_len;                                                       \
946 })
947
948 #ifndef CONFIG_EVA
949
950 #define __invoke_copy_from_user(to, from, n)                            \
951 ({                                                                      \
952         register void *__cu_to_r __asm__("$4");                         \
953         register const void __user *__cu_from_r __asm__("$5");          \
954         register long __cu_len_r __asm__("$6");                         \
955                                                                         \
956         __cu_to_r = (to);                                               \
957         __cu_from_r = (from);                                           \
958         __cu_len_r = (n);                                               \
959         __asm__ __volatile__(                                           \
960         ".set\tnoreorder\n\t"                                           \
961         __MODULE_JAL(__copy_user)                                       \
962         ".set\tnoat\n\t"                                                \
963         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
964         ".set\tat\n\t"                                                  \
965         ".set\treorder"                                                 \
966         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
967         :                                                               \
968         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
969           DADDI_SCRATCH, "memory");                                     \
970         __cu_len_r;                                                     \
971 })
972
973 #define __invoke_copy_from_kernel(to, from, n)                          \
974         __invoke_copy_from_user(to, from, n)
975
976 /* For userland <-> userland operations */
977 #define ___invoke_copy_in_user(to, from, n)                             \
978         __invoke_copy_from_user(to, from, n)
979
980 /* For kernel <-> kernel operations */
981 #define ___invoke_copy_in_kernel(to, from, n)                           \
982         __invoke_copy_from_user(to, from, n)
983
984 #define __invoke_copy_from_user_inatomic(to, from, n)                   \
985 ({                                                                      \
986         register void *__cu_to_r __asm__("$4");                         \
987         register const void __user *__cu_from_r __asm__("$5");          \
988         register long __cu_len_r __asm__("$6");                         \
989                                                                         \
990         __cu_to_r = (to);                                               \
991         __cu_from_r = (from);                                           \
992         __cu_len_r = (n);                                               \
993         __asm__ __volatile__(                                           \
994         ".set\tnoreorder\n\t"                                           \
995         __MODULE_JAL(__copy_user_inatomic)                              \
996         ".set\tnoat\n\t"                                                \
997         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
998         ".set\tat\n\t"                                                  \
999         ".set\treorder"                                                 \
1000         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1001         :                                                               \
1002         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1003           DADDI_SCRATCH, "memory");                                     \
1004         __cu_len_r;                                                     \
1005 })
1006
1007 #define __invoke_copy_from_kernel_inatomic(to, from, n)                 \
1008         __invoke_copy_from_user_inatomic(to, from, n)                   \
1009
1010 #else
1011
1012 /* EVA specific functions */
1013
1014 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
1015                                        size_t __n);
1016 extern size_t __copy_from_user_eva(void *__to, const void *__from,
1017                                    size_t __n);
1018 extern size_t __copy_to_user_eva(void *__to, const void *__from,
1019                                  size_t __n);
1020 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1021
1022 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)      \
1023 ({                                                                      \
1024         register void *__cu_to_r __asm__("$4");                         \
1025         register const void __user *__cu_from_r __asm__("$5");          \
1026         register long __cu_len_r __asm__("$6");                         \
1027                                                                         \
1028         __cu_to_r = (to);                                               \
1029         __cu_from_r = (from);                                           \
1030         __cu_len_r = (n);                                               \
1031         __asm__ __volatile__(                                           \
1032         ".set\tnoreorder\n\t"                                           \
1033         __MODULE_JAL(func_ptr)                                          \
1034         ".set\tnoat\n\t"                                                \
1035         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
1036         ".set\tat\n\t"                                                  \
1037         ".set\treorder"                                                 \
1038         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1039         :                                                               \
1040         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1041           DADDI_SCRATCH, "memory");                                     \
1042         __cu_len_r;                                                     \
1043 })
1044
1045 #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)        \
1046 ({                                                                      \
1047         register void *__cu_to_r __asm__("$4");                         \
1048         register const void __user *__cu_from_r __asm__("$5");          \
1049         register long __cu_len_r __asm__("$6");                         \
1050                                                                         \
1051         __cu_to_r = (to);                                               \
1052         __cu_from_r = (from);                                           \
1053         __cu_len_r = (n);                                               \
1054         __asm__ __volatile__(                                           \
1055         __MODULE_JAL(func_ptr)                                          \
1056         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1057         :                                                               \
1058         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1059           DADDI_SCRATCH, "memory");                                     \
1060         __cu_len_r;                                                     \
1061 })
1062
1063 /*
1064  * Source or destination address is in userland. We need to go through
1065  * the TLB
1066  */
1067 #define __invoke_copy_from_user(to, from, n)                            \
1068         __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1069
1070 #define __invoke_copy_from_user_inatomic(to, from, n)                   \
1071         __invoke_copy_from_user_eva_generic(to, from, n,                \
1072                                             __copy_user_inatomic_eva)
1073
1074 #define __invoke_copy_to_user(to, from, n)                              \
1075         __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1076
1077 #define ___invoke_copy_in_user(to, from, n)                             \
1078         __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1079
1080 /*
1081  * Source or destination address in the kernel. We are not going through
1082  * the TLB
1083  */
1084 #define __invoke_copy_from_kernel(to, from, n)                          \
1085         __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1086
1087 #define __invoke_copy_from_kernel_inatomic(to, from, n)                 \
1088         __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1089
1090 #define __invoke_copy_to_kernel(to, from, n)                            \
1091         __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1092
1093 #define ___invoke_copy_in_kernel(to, from, n)                           \
1094         __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1095
1096 #endif /* CONFIG_EVA */
1097
1098 /*
1099  * __copy_from_user: - Copy a block of data from user space, with less checking.
1100  * @to:   Destination address, in kernel space.
1101  * @from: Source address, in user space.
1102  * @n:    Number of bytes to copy.
1103  *
1104  * Context: User context only. This function may sleep if pagefaults are
1105  *          enabled.
1106  *
1107  * Copy data from user space to kernel space.  Caller must check
1108  * the specified block with access_ok() before calling this function.
1109  *
1110  * Returns number of bytes that could not be copied.
1111  * On success, this will be zero.
1112  *
1113  * If some data could not be copied, this function will pad the copied
1114  * data to the requested size using zero bytes.
1115  */
1116 #define __copy_from_user(to, from, n)                                   \
1117 ({                                                                      \
1118         void *__cu_to;                                                  \
1119         const void __user *__cu_from;                                   \
1120         long __cu_len;                                                  \
1121                                                                         \
1122         __cu_to = (to);                                                 \
1123         __cu_from = (from);                                             \
1124         __cu_len = (n);                                                 \
1125         if (eva_kernel_access()) {                                      \
1126                 __cu_len = __invoke_copy_from_kernel(__cu_to,           \
1127                                                      __cu_from,         \
1128                                                      __cu_len);         \
1129         } else {                                                        \
1130                 might_fault();                                          \
1131                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
1132                                                    __cu_len);           \
1133         }                                                               \
1134         __cu_len;                                                       \
1135 })
1136
1137 /*
1138  * copy_from_user: - Copy a block of data from user space.
1139  * @to:   Destination address, in kernel space.
1140  * @from: Source address, in user space.
1141  * @n:    Number of bytes to copy.
1142  *
1143  * Context: User context only. This function may sleep if pagefaults are
1144  *          enabled.
1145  *
1146  * Copy data from user space to kernel space.
1147  *
1148  * Returns number of bytes that could not be copied.
1149  * On success, this will be zero.
1150  *
1151  * If some data could not be copied, this function will pad the copied
1152  * data to the requested size using zero bytes.
1153  */
1154 #define copy_from_user(to, from, n)                                     \
1155 ({                                                                      \
1156         void *__cu_to;                                                  \
1157         const void __user *__cu_from;                                   \
1158         long __cu_len;                                                  \
1159                                                                         \
1160         __cu_to = (to);                                                 \
1161         __cu_from = (from);                                             \
1162         __cu_len = (n);                                                 \
1163         if (eva_kernel_access()) {                                      \
1164                 __cu_len = __invoke_copy_from_kernel(__cu_to,           \
1165                                                      __cu_from,         \
1166                                                      __cu_len);         \
1167         } else {                                                        \
1168                 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {      \
1169                         might_fault();                                  \
1170                         __cu_len = __invoke_copy_from_user(__cu_to,     \
1171                                                            __cu_from,   \
1172                                                            __cu_len);   \
1173                 }                                                       \
1174         }                                                               \
1175         __cu_len;                                                       \
1176 })
1177
1178 #define __copy_in_user(to, from, n)                                     \
1179 ({                                                                      \
1180         void __user *__cu_to;                                           \
1181         const void __user *__cu_from;                                   \
1182         long __cu_len;                                                  \
1183                                                                         \
1184         __cu_to = (to);                                                 \
1185         __cu_from = (from);                                             \
1186         __cu_len = (n);                                                 \
1187         if (eva_kernel_access()) {                                      \
1188                 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
1189                                                     __cu_len);          \
1190         } else {                                                        \
1191                 might_fault();                                          \
1192                 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,   \
1193                                                   __cu_len);            \
1194         }                                                               \
1195         __cu_len;                                                       \
1196 })
1197
1198 #define copy_in_user(to, from, n)                                       \
1199 ({                                                                      \
1200         void __user *__cu_to;                                           \
1201         const void __user *__cu_from;                                   \
1202         long __cu_len;                                                  \
1203                                                                         \
1204         __cu_to = (to);                                                 \
1205         __cu_from = (from);                                             \
1206         __cu_len = (n);                                                 \
1207         if (eva_kernel_access()) {                                      \
1208                 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,  \
1209                                                     __cu_len);          \
1210         } else {                                                        \
1211                 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1212                            access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1213                         might_fault();                                  \
1214                         __cu_len = ___invoke_copy_in_user(__cu_to,      \
1215                                                           __cu_from,    \
1216                                                           __cu_len);    \
1217                 }                                                       \
1218         }                                                               \
1219         __cu_len;                                                       \
1220 })
1221
1222 /*
1223  * __clear_user: - Zero a block of memory in user space, with less checking.
1224  * @to:   Destination address, in user space.
1225  * @n:    Number of bytes to zero.
1226  *
1227  * Zero a block of memory in user space.  Caller must check
1228  * the specified block with access_ok() before calling this function.
1229  *
1230  * Returns number of bytes that could not be cleared.
1231  * On success, this will be zero.
1232  */
1233 static inline __kernel_size_t
1234 __clear_user(void __user *addr, __kernel_size_t size)
1235 {
1236         __kernel_size_t res;
1237
1238         if (eva_kernel_access()) {
1239                 __asm__ __volatile__(
1240                         "move\t$4, %1\n\t"
1241                         "move\t$5, $0\n\t"
1242                         "move\t$6, %2\n\t"
1243                         __MODULE_JAL(__bzero_kernel)
1244                         "move\t%0, $6"
1245                         : "=r" (res)
1246                         : "r" (addr), "r" (size)
1247                         : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1248         } else {
1249                 might_fault();
1250                 __asm__ __volatile__(
1251                         "move\t$4, %1\n\t"
1252                         "move\t$5, $0\n\t"
1253                         "move\t$6, %2\n\t"
1254                         __MODULE_JAL(__bzero)
1255                         "move\t%0, $6"
1256                         : "=r" (res)
1257                         : "r" (addr), "r" (size)
1258                         : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1259         }
1260
1261         return res;
1262 }
1263
1264 #define clear_user(addr,n)                                              \
1265 ({                                                                      \
1266         void __user * __cl_addr = (addr);                               \
1267         unsigned long __cl_size = (n);                                  \
1268         if (__cl_size && access_ok(VERIFY_WRITE,                        \
1269                                         __cl_addr, __cl_size))          \
1270                 __cl_size = __clear_user(__cl_addr, __cl_size);         \
1271         __cl_size;                                                      \
1272 })
1273
1274 /*
1275  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1276  * @dst:   Destination address, in kernel space.  This buffer must be at
1277  *         least @count bytes long.
1278  * @src:   Source address, in user space.
1279  * @count: Maximum number of bytes to copy, including the trailing NUL.
1280  *
1281  * Copies a NUL-terminated string from userspace to kernel space.
1282  * Caller must check the specified block with access_ok() before calling
1283  * this function.
1284  *
1285  * On success, returns the length of the string (not including the trailing
1286  * NUL).
1287  *
1288  * If access to userspace fails, returns -EFAULT (some data may have been
1289  * copied).
1290  *
1291  * If @count is smaller than the length of the string, copies @count bytes
1292  * and returns @count.
1293  */
1294 static inline long
1295 __strncpy_from_user(char *__to, const char __user *__from, long __len)
1296 {
1297         long res;
1298
1299         if (eva_kernel_access()) {
1300                 __asm__ __volatile__(
1301                         "move\t$4, %1\n\t"
1302                         "move\t$5, %2\n\t"
1303                         "move\t$6, %3\n\t"
1304                         __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1305                         "move\t%0, $2"
1306                         : "=r" (res)
1307                         : "r" (__to), "r" (__from), "r" (__len)
1308                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1309         } else {
1310                 might_fault();
1311                 __asm__ __volatile__(
1312                         "move\t$4, %1\n\t"
1313                         "move\t$5, %2\n\t"
1314                         "move\t$6, %3\n\t"
1315                         __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1316                         "move\t%0, $2"
1317                         : "=r" (res)
1318                         : "r" (__to), "r" (__from), "r" (__len)
1319                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1320         }
1321
1322         return res;
1323 }
1324
1325 /*
1326  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1327  * @dst:   Destination address, in kernel space.  This buffer must be at
1328  *         least @count bytes long.
1329  * @src:   Source address, in user space.
1330  * @count: Maximum number of bytes to copy, including the trailing NUL.
1331  *
1332  * Copies a NUL-terminated string from userspace to kernel space.
1333  *
1334  * On success, returns the length of the string (not including the trailing
1335  * NUL).
1336  *
1337  * If access to userspace fails, returns -EFAULT (some data may have been
1338  * copied).
1339  *
1340  * If @count is smaller than the length of the string, copies @count bytes
1341  * and returns @count.
1342  */
1343 static inline long
1344 strncpy_from_user(char *__to, const char __user *__from, long __len)
1345 {
1346         long res;
1347
1348         if (eva_kernel_access()) {
1349                 __asm__ __volatile__(
1350                         "move\t$4, %1\n\t"
1351                         "move\t$5, %2\n\t"
1352                         "move\t$6, %3\n\t"
1353                         __MODULE_JAL(__strncpy_from_kernel_asm)
1354                         "move\t%0, $2"
1355                         : "=r" (res)
1356                         : "r" (__to), "r" (__from), "r" (__len)
1357                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1358         } else {
1359                 might_fault();
1360                 __asm__ __volatile__(
1361                         "move\t$4, %1\n\t"
1362                         "move\t$5, %2\n\t"
1363                         "move\t$6, %3\n\t"
1364                         __MODULE_JAL(__strncpy_from_user_asm)
1365                         "move\t%0, $2"
1366                         : "=r" (res)
1367                         : "r" (__to), "r" (__from), "r" (__len)
1368                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1369         }
1370
1371         return res;
1372 }
1373
1374 /*
1375  * strlen_user: - Get the size of a string in user space.
1376  * @str: The string to measure.
1377  *
1378  * Context: User context only. This function may sleep if pagefaults are
1379  *          enabled.
1380  *
1381  * Get the size of a NUL-terminated string in user space.
1382  *
1383  * Returns the size of the string INCLUDING the terminating NUL.
1384  * On exception, returns 0.
1385  *
1386  * If there is a limit on the length of a valid string, you may wish to
1387  * consider using strnlen_user() instead.
1388  */
1389 static inline long strlen_user(const char __user *s)
1390 {
1391         long res;
1392
1393         if (eva_kernel_access()) {
1394                 __asm__ __volatile__(
1395                         "move\t$4, %1\n\t"
1396                         __MODULE_JAL(__strlen_kernel_asm)
1397                         "move\t%0, $2"
1398                         : "=r" (res)
1399                         : "r" (s)
1400                         : "$2", "$4", __UA_t0, "$31");
1401         } else {
1402                 might_fault();
1403                 __asm__ __volatile__(
1404                         "move\t$4, %1\n\t"
1405                         __MODULE_JAL(__strlen_user_asm)
1406                         "move\t%0, $2"
1407                         : "=r" (res)
1408                         : "r" (s)
1409                         : "$2", "$4", __UA_t0, "$31");
1410         }
1411
1412         return res;
1413 }
1414
1415 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1416 static inline long __strnlen_user(const char __user *s, long n)
1417 {
1418         long res;
1419
1420         if (eva_kernel_access()) {
1421                 __asm__ __volatile__(
1422                         "move\t$4, %1\n\t"
1423                         "move\t$5, %2\n\t"
1424                         __MODULE_JAL(__strnlen_kernel_nocheck_asm)
1425                         "move\t%0, $2"
1426                         : "=r" (res)
1427                         : "r" (s), "r" (n)
1428                         : "$2", "$4", "$5", __UA_t0, "$31");
1429         } else {
1430                 might_fault();
1431                 __asm__ __volatile__(
1432                         "move\t$4, %1\n\t"
1433                         "move\t$5, %2\n\t"
1434                         __MODULE_JAL(__strnlen_user_nocheck_asm)
1435                         "move\t%0, $2"
1436                         : "=r" (res)
1437                         : "r" (s), "r" (n)
1438                         : "$2", "$4", "$5", __UA_t0, "$31");
1439         }
1440
1441         return res;
1442 }
1443
1444 /*
1445  * strnlen_user: - Get the size of a string in user space.
1446  * @str: The string to measure.
1447  *
1448  * Context: User context only. This function may sleep if pagefaults are
1449  *          enabled.
1450  *
1451  * Get the size of a NUL-terminated string in user space.
1452  *
1453  * Returns the size of the string INCLUDING the terminating NUL.
1454  * On exception, returns 0.
1455  * If the string is too long, returns a value greater than @n.
1456  */
1457 static inline long strnlen_user(const char __user *s, long n)
1458 {
1459         long res;
1460
1461         might_fault();
1462         if (eva_kernel_access()) {
1463                 __asm__ __volatile__(
1464                         "move\t$4, %1\n\t"
1465                         "move\t$5, %2\n\t"
1466                         __MODULE_JAL(__strnlen_kernel_asm)
1467                         "move\t%0, $2"
1468                         : "=r" (res)
1469                         : "r" (s), "r" (n)
1470                         : "$2", "$4", "$5", __UA_t0, "$31");
1471         } else {
1472                 __asm__ __volatile__(
1473                         "move\t$4, %1\n\t"
1474                         "move\t$5, %2\n\t"
1475                         __MODULE_JAL(__strnlen_user_asm)
1476                         "move\t%0, $2"
1477                         : "=r" (res)
1478                         : "r" (s), "r" (n)
1479                         : "$2", "$4", "$5", __UA_t0, "$31");
1480         }
1481
1482         return res;
1483 }
1484
1485 struct exception_table_entry
1486 {
1487         unsigned long insn;
1488         unsigned long nextinsn;
1489 };
1490
1491 extern int fixup_exception(struct pt_regs *regs);
1492
1493 #endif /* _ASM_UACCESS_H */