1 #ifndef _ASM_X86_UACCESS_32_H
2 #define _ASM_X86_UACCESS_32_H
5 * User space memory access functions
7 #include <linux/errno.h>
8 #include <linux/thread_info.h>
9 #include <linux/string.h>
13 unsigned long __must_check __copy_to_user_ll
14 (void __user *to, const void *from, unsigned long n);
15 unsigned long __must_check __copy_from_user_ll
16 (void *to, const void __user *from, unsigned long n);
17 unsigned long __must_check __copy_from_user_ll_nozero
18 (void *to, const void __user *from, unsigned long n);
19 unsigned long __must_check __copy_from_user_ll_nocache
20 (void *to, const void __user *from, unsigned long n);
21 unsigned long __must_check __copy_from_user_ll_nocache_nozero
22 (void *to, const void __user *from, unsigned long n);
25 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
26 * @to: Destination address, in user space.
27 * @from: Source address, in kernel space.
28 * @n: Number of bytes to copy.
30 * Context: User context only.
32 * Copy data from kernel space to user space. Caller must check
33 * the specified block with access_ok() before calling this function.
34 * The caller should also make sure he pins the user space address
35 * so that we don't result in page fault and sleep.
37 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
38 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
39 * If a store crosses a page boundary and gets a fault, the x86 will not write
40 * anything, so this is accurate.
43 static __always_inline unsigned long __must_check
44 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
46 if (__builtin_constant_p(n)) {
51 __put_user_size(*(u8 *)from, (u8 __user *)to,
55 __put_user_size(*(u16 *)from, (u16 __user *)to,
59 __put_user_size(*(u32 *)from, (u32 __user *)to,
64 return __copy_to_user_ll(to, from, n);
68 * __copy_to_user: - Copy a block of data into user space, with less checking.
69 * @to: Destination address, in user space.
70 * @from: Source address, in kernel space.
71 * @n: Number of bytes to copy.
73 * Context: User context only. This function may sleep if pagefaults are
76 * Copy data from kernel space to user space. Caller must check
77 * the specified block with access_ok() before calling this function.
79 * Returns number of bytes that could not be copied.
80 * On success, this will be zero.
82 static __always_inline unsigned long __must_check
83 __copy_to_user(void __user *to, const void *from, unsigned long n)
86 return __copy_to_user_inatomic(to, from, n);
89 static __always_inline unsigned long
90 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
92 /* Avoid zeroing the tail if the copy fails..
93 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
94 * but as the zeroing behaviour is only significant when n is not
95 * constant, that shouldn't be a problem.
97 if (__builtin_constant_p(n)) {
102 __get_user_size(*(u8 *)to, from, 1, ret, 1);
105 __get_user_size(*(u16 *)to, from, 2, ret, 2);
108 __get_user_size(*(u32 *)to, from, 4, ret, 4);
112 return __copy_from_user_ll_nozero(to, from, n);
116 * __copy_from_user: - Copy a block of data from user space, with less checking.
117 * @to: Destination address, in kernel space.
118 * @from: Source address, in user space.
119 * @n: Number of bytes to copy.
121 * Context: User context only. This function may sleep if pagefaults are
124 * Copy data from user space to kernel space. Caller must check
125 * the specified block with access_ok() before calling this function.
127 * Returns number of bytes that could not be copied.
128 * On success, this will be zero.
130 * If some data could not be copied, this function will pad the copied
131 * data to the requested size using zero bytes.
133 * An alternate version - __copy_from_user_inatomic() - may be called from
134 * atomic context and will fail rather than sleep. In this case the
135 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
136 * for explanation of why this is needed.
138 static __always_inline unsigned long
139 __copy_from_user(void *to, const void __user *from, unsigned long n)
142 if (__builtin_constant_p(n)) {
147 __get_user_size(*(u8 *)to, from, 1, ret, 1);
150 __get_user_size(*(u16 *)to, from, 2, ret, 2);
153 __get_user_size(*(u32 *)to, from, 4, ret, 4);
157 return __copy_from_user_ll(to, from, n);
160 static __always_inline unsigned long __copy_from_user_nocache(void *to,
161 const void __user *from, unsigned long n)
164 if (__builtin_constant_p(n)) {
169 __get_user_size(*(u8 *)to, from, 1, ret, 1);
172 __get_user_size(*(u16 *)to, from, 2, ret, 2);
175 __get_user_size(*(u32 *)to, from, 4, ret, 4);
179 return __copy_from_user_ll_nocache(to, from, n);
182 static __always_inline unsigned long
183 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
186 return __copy_from_user_ll_nocache_nozero(to, from, n);
189 #endif /* _ASM_X86_UACCESS_32_H */