Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / arch / sparc / include / asm / uaccess_64.h
1 #ifndef _ASM_UACCESS_H
2 #define _ASM_UACCESS_H
3
4 /*
5  * User space memory access functions
6  */
7
8 #ifdef __KERNEL__
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <linux/string.h>
12 #include <linux/thread_info.h>
13 #include <asm/asi.h>
14 #include <asm/spitfire.h>
15 #include <asm-generic/uaccess-unaligned.h>
16 #endif
17
18 #ifndef __ASSEMBLY__
19
20 #include <asm/processor.h>
21
22 /*
23  * Sparc64 is segmented, though more like the M68K than the I386.
24  * We use the secondary ASI to address user memory, which references a
25  * completely different VM map, thus there is zero chance of the user
26  * doing something queer and tricking us into poking kernel memory.
27  *
28  * What is left here is basically what is needed for the other parts of
29  * the kernel that expect to be able to manipulate, erum, "segments".
30  * Or perhaps more properly, permissions.
31  *
32  * "For historical reasons, these macros are grossly misnamed." -Linus
33  */
34
35 #define KERNEL_DS   ((mm_segment_t) { ASI_P })
36 #define USER_DS     ((mm_segment_t) { ASI_AIUS })       /* har har har */
37
38 #define VERIFY_READ     0
39 #define VERIFY_WRITE    1
40
41 #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
42 #define get_ds() (KERNEL_DS)
43
44 #define segment_eq(a, b)  ((a).seg == (b).seg)
45
46 #define set_fs(val)                                                             \
47 do {                                                                            \
48         current_thread_info()->current_ds = (val).seg;                          \
49         __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));        \
50 } while(0)
51
52 /*
53  * Test whether a block of memory is a valid user space address.
54  * Returns 0 if the range is valid, nonzero otherwise.
55  */
56 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
57 {
58         if (__builtin_constant_p(size))
59                 return addr > limit - size;
60
61         addr += size;
62         if (addr < size)
63                 return true;
64
65         return addr > limit;
66 }
67
68 #define __range_not_ok(addr, size, limit)                               \
69 ({                                                                      \
70         __chk_user_ptr(addr);                                           \
71         __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
72 })
73
74 static inline int __access_ok(const void __user * addr, unsigned long size)
75 {
76         return 1;
77 }
78
79 static inline int access_ok(int type, const void __user * addr, unsigned long size)
80 {
81         return 1;
82 }
83
84 /*
85  * The exception table consists of pairs of addresses: the first is the
86  * address of an instruction that is allowed to fault, and the second is
87  * the address at which the program should continue.  No registers are
88  * modified, so it is entirely up to the continuation code to figure out
89  * what to do.
90  *
91  * All the routines below use bits of fixup code that are out of line
92  * with the main instruction path.  This means when everything is well,
93  * we don't even have to jump over them.  Further, they do not intrude
94  * on our cache or tlb entries.
95  */
96
97 struct exception_table_entry {
98         unsigned int insn, fixup;
99 };
100
101 void __retl_efault(void);
102
103 /* Uh, these should become the main single-value transfer routines..
104  * They automatically use the right size if we just have the right
105  * pointer type..
106  *
107  * This gets kind of ugly. We want to return _two_ values in "get_user()"
108  * and yet we don't want to do any pointers, because that is too much
109  * of a performance impact. Thus we have a few rather ugly macros here,
110  * and hide all the ugliness from the user.
111  */
112 #define put_user(x, ptr) ({ \
113         unsigned long __pu_addr = (unsigned long)(ptr); \
114         __chk_user_ptr(ptr); \
115         __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
116 })
117
118 #define get_user(x, ptr) ({ \
119         unsigned long __gu_addr = (unsigned long)(ptr); \
120         __chk_user_ptr(ptr); \
121         __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
122 })
123
124 #define __put_user(x, ptr) put_user(x, ptr)
125 #define __get_user(x, ptr) get_user(x, ptr)
126
127 struct __large_struct { unsigned long buf[100]; };
128 #define __m(x) ((struct __large_struct *)(x))
129
130 #define __put_user_nocheck(data, addr, size) ({                 \
131         register int __pu_ret;                                  \
132         switch (size) {                                         \
133         case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
134         case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
135         case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
136         case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
137         default: __pu_ret = __put_user_bad(); break;            \
138         }                                                       \
139         __pu_ret;                                               \
140 })
141
142 #define __put_user_asm(x, size, addr, ret)                              \
143 __asm__ __volatile__(                                                   \
144                 "/* Put user asm, inline. */\n"                         \
145         "1:\t"  "st"#size "a %1, [%2] %%asi\n\t"                        \
146                 "clr    %0\n"                                           \
147         "2:\n\n\t"                                                      \
148                 ".section .fixup,#alloc,#execinstr\n\t"                 \
149                 ".align 4\n"                                            \
150         "3:\n\t"                                                        \
151                 "sethi  %%hi(2b), %0\n\t"                               \
152                 "jmpl   %0 + %%lo(2b), %%g0\n\t"                        \
153                 " mov   %3, %0\n\n\t"                                   \
154                 ".previous\n\t"                                         \
155                 ".section __ex_table,\"a\"\n\t"                         \
156                 ".align 4\n\t"                                          \
157                 ".word  1b, 3b\n\t"                                     \
158                 ".previous\n\n\t"                                       \
159                : "=r" (ret) : "r" (x), "r" (__m(addr)),                 \
160                  "i" (-EFAULT))
161
162 int __put_user_bad(void);
163
164 #define __get_user_nocheck(data, addr, size, type) ({                        \
165         register int __gu_ret;                                               \
166         register unsigned long __gu_val;                                     \
167         switch (size) {                                                      \
168                 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
169                 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
170                 case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
171                 case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
172                 default:                                                     \
173                         __gu_val = 0;                                        \
174                         __gu_ret = __get_user_bad();                         \
175                         break;                                               \
176         }                                                                    \
177         data = (__force type) __gu_val;                                      \
178          __gu_ret;                                                           \
179 })
180
181 #define __get_user_asm(x, size, addr, ret)                              \
182 __asm__ __volatile__(                                                   \
183                 "/* Get user asm, inline. */\n"                         \
184         "1:\t"  "ld"#size "a [%2] %%asi, %1\n\t"                        \
185                 "clr    %0\n"                                           \
186         "2:\n\n\t"                                                      \
187                 ".section .fixup,#alloc,#execinstr\n\t"                 \
188                 ".align 4\n"                                            \
189         "3:\n\t"                                                        \
190                 "sethi  %%hi(2b), %0\n\t"                               \
191                 "clr    %1\n\t"                                         \
192                 "jmpl   %0 + %%lo(2b), %%g0\n\t"                        \
193                 " mov   %3, %0\n\n\t"                                   \
194                 ".previous\n\t"                                         \
195                 ".section __ex_table,\"a\"\n\t"                         \
196                 ".align 4\n\t"                                          \
197                 ".word  1b, 3b\n\n\t"                                   \
198                 ".previous\n\t"                                         \
199                : "=r" (ret), "=r" (x) : "r" (__m(addr)),                \
200                  "i" (-EFAULT))
201
202 int __get_user_bad(void);
203
204 unsigned long __must_check ___copy_from_user(void *to,
205                                              const void __user *from,
206                                              unsigned long size);
207 static inline unsigned long __must_check
208 copy_from_user(void *to, const void __user *from, unsigned long size)
209 {
210         return ___copy_from_user(to, from, size);
211 }
212 #define __copy_from_user copy_from_user
213
214 unsigned long __must_check ___copy_to_user(void __user *to,
215                                            const void *from,
216                                            unsigned long size);
217 static inline unsigned long __must_check
218 copy_to_user(void __user *to, const void *from, unsigned long size)
219 {
220         return ___copy_to_user(to, from, size);
221 }
222 #define __copy_to_user copy_to_user
223
224 unsigned long __must_check ___copy_in_user(void __user *to,
225                                            const void __user *from,
226                                            unsigned long size);
227 static inline unsigned long __must_check
228 copy_in_user(void __user *to, void __user *from, unsigned long size)
229 {
230         return ___copy_in_user(to, from, size);
231 }
232 #define __copy_in_user copy_in_user
233
234 unsigned long __must_check __clear_user(void __user *, unsigned long);
235
236 #define clear_user __clear_user
237
238 __must_check long strlen_user(const char __user *str);
239 __must_check long strnlen_user(const char __user *str, long n);
240
241 #define __copy_to_user_inatomic __copy_to_user
242 #define __copy_from_user_inatomic __copy_from_user
243
244 struct pt_regs;
245 unsigned long compute_effective_address(struct pt_regs *,
246                                         unsigned int insn,
247                                         unsigned int rd);
248
249 #endif  /* __ASSEMBLY__ */
250
251 #endif /* _ASM_UACCESS_H */