These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / mips / include / asm / uaccess.h
index 9722357..095ecaf 100644 (file)
@@ -78,6 +78,21 @@ extern u64 __ua_limit;
 
 #define segment_eq(a, b)       ((a).seg == (b).seg)
 
+/*
+ * eva_kernel_access() - determine whether kernel memory access on an EVA system
+ *
+ * Determines whether memory accesses should be performed to kernel memory
+ * on a system using Extended Virtual Addressing (EVA).
+ *
+ * Return: true if a kernel memory access on an EVA system, else false.
+ */
+static inline bool eva_kernel_access(void)
+{
+       if (!config_enabled(CONFIG_EVA))
+               return false;
+
+       return segment_eq(get_fs(), get_ds());
+}
 
 /*
  * Is a address valid? This does a straighforward calculation rather
@@ -286,7 +301,7 @@ do {                                                                        \
 ({                                                                     \
        int __gu_err;                                                   \
                                                                        \
-       if (segment_eq(get_fs(), get_ds())) {                           \
+       if (eva_kernel_access()) {                                      \
                __get_kernel_common((x), size, ptr);                    \
        } else {                                                        \
                __chk_user_ptr(ptr);                                    \
@@ -302,7 +317,7 @@ do {                                                                        \
                                                                        \
        might_fault();                                                  \
        if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {          \
-               if (segment_eq(get_fs(), get_ds()))                     \
+               if (eva_kernel_access())                                \
                        __get_kernel_common((x), size, __gu_ptr);       \
                else                                                    \
                        __get_user_common((x), size, __gu_ptr);         \
@@ -427,7 +442,7 @@ do {                                                                        \
        int __pu_err = 0;                                               \
                                                                        \
        __pu_val = (x);                                                 \
-       if (segment_eq(get_fs(), get_ds())) {                           \
+       if (eva_kernel_access()) {                                      \
                __put_kernel_common(ptr, size);                         \
        } else {                                                        \
                __chk_user_ptr(ptr);                                    \
@@ -444,7 +459,7 @@ do {                                                                        \
                                                                        \
        might_fault();                                                  \
        if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
-               if (segment_eq(get_fs(), get_ds()))                     \
+               if (eva_kernel_access())                                \
                        __put_kernel_common(__pu_addr, size);           \
                else                                                    \
                        __put_user_common(__pu_addr, size);             \
@@ -584,7 +599,7 @@ extern void __put_user_unknown(void);
  * On error, the variable @x is set to zero.
  */
 #define __get_user_unaligned(x,ptr) \
-       __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
+       __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
 
 /*
  * Yuck.  We need two variants, one for 64bit operation and one
@@ -605,8 +620,8 @@ extern void __get_user_unaligned_unknown(void);
 do {                                                                   \
        switch (size) {                                                 \
        case 1: __get_data_asm(val, "lb", ptr); break;                  \
-       case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;       \
-       case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;       \
+       case 2: __get_data_unaligned_asm(val, "ulh", ptr); break;       \
+       case 4: __get_data_unaligned_asm(val, "ulw", ptr); break;       \
        case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;               \
        default: __get_user_unaligned_unknown(); break;                 \
        }                                                               \
@@ -843,7 +858,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
        might_fault();                                                  \
-       if (segment_eq(get_fs(), get_ds()))                             \
+       if (eva_kernel_access())                                        \
                __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
                                                   __cu_len);           \
        else                                                            \
@@ -863,7 +878,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       if (segment_eq(get_fs(), get_ds()))                             \
+       if (eva_kernel_access())                                        \
                __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
                                                   __cu_len);           \
        else                                                            \
@@ -881,7 +896,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       if (segment_eq(get_fs(), get_ds()))                             \
+       if (eva_kernel_access())                                        \
                __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,  \
                                                              __cu_from,\
                                                              __cu_len);\
@@ -915,7 +930,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       if (segment_eq(get_fs(), get_ds())) {                           \
+       if (eva_kernel_access()) {                                      \
                __cu_len = __invoke_copy_to_kernel(__cu_to,             \
                                                   __cu_from,           \
                                                   __cu_len);           \
@@ -1107,9 +1122,15 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       might_fault();                                                  \
-       __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          \
-                                          __cu_len);                   \
+       if (eva_kernel_access()) {                                      \
+               __cu_len = __invoke_copy_from_kernel(__cu_to,           \
+                                                    __cu_from,         \
+                                                    __cu_len);         \
+       } else {                                                        \
+               might_fault();                                          \
+               __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
+                                                  __cu_len);           \
+       }                                                               \
        __cu_len;                                                       \
 })
 
@@ -1139,7 +1160,7 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       if (segment_eq(get_fs(), get_ds())) {                           \
+       if (eva_kernel_access()) {                                      \
                __cu_len = __invoke_copy_from_kernel(__cu_to,           \
                                                     __cu_from,         \
                                                     __cu_len);         \
@@ -1163,7 +1184,7 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       if (segment_eq(get_fs(), get_ds())) {                           \
+       if (eva_kernel_access()) {                                      \
                __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
                                                    __cu_len);          \
        } else {                                                        \
@@ -1183,7 +1204,7 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       if (segment_eq(get_fs(), get_ds())) {                           \
+       if (eva_kernel_access()) {                                      \
                __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,  \
                                                    __cu_len);          \
        } else {                                                        \
@@ -1214,16 +1235,28 @@ __clear_user(void __user *addr, __kernel_size_t size)
 {
        __kernel_size_t res;
 
-       might_fault();
-       __asm__ __volatile__(
-               "move\t$4, %1\n\t"
-               "move\t$5, $0\n\t"
-               "move\t$6, %2\n\t"
-               __MODULE_JAL(__bzero)
-               "move\t%0, $6"
-               : "=r" (res)
-               : "r" (addr), "r" (size)
-               : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
+       if (eva_kernel_access()) {
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       "move\t$5, $0\n\t"
+                       "move\t$6, %2\n\t"
+                       __MODULE_JAL(__bzero_kernel)
+                       "move\t%0, $6"
+                       : "=r" (res)
+                       : "r" (addr), "r" (size)
+                       : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
+       } else {
+               might_fault();
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       "move\t$5, $0\n\t"
+                       "move\t$6, %2\n\t"
+                       __MODULE_JAL(__bzero)
+                       "move\t%0, $6"
+                       : "=r" (res)
+                       : "r" (addr), "r" (size)
+                       : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
+       }
 
        return res;
 }
@@ -1263,7 +1296,7 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len)
 {
        long res;
 
-       if (segment_eq(get_fs(), get_ds())) {
+       if (eva_kernel_access()) {
                __asm__ __volatile__(
                        "move\t$4, %1\n\t"
                        "move\t$5, %2\n\t"
@@ -1312,7 +1345,7 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
 {
        long res;
 
-       if (segment_eq(get_fs(), get_ds())) {
+       if (eva_kernel_access()) {
                __asm__ __volatile__(
                        "move\t$4, %1\n\t"
                        "move\t$5, %2\n\t"
@@ -1357,7 +1390,7 @@ static inline long strlen_user(const char __user *s)
 {
        long res;
 
-       if (segment_eq(get_fs(), get_ds())) {
+       if (eva_kernel_access()) {
                __asm__ __volatile__(
                        "move\t$4, %1\n\t"
                        __MODULE_JAL(__strlen_kernel_asm)
@@ -1369,7 +1402,7 @@ static inline long strlen_user(const char __user *s)
                might_fault();
                __asm__ __volatile__(
                        "move\t$4, %1\n\t"
-                       __MODULE_JAL(__strlen_kernel_asm)
+                       __MODULE_JAL(__strlen_user_asm)
                        "move\t%0, $2"
                        : "=r" (res)
                        : "r" (s)
@@ -1384,7 +1417,7 @@ static inline long __strnlen_user(const char __user *s, long n)
 {
        long res;
 
-       if (segment_eq(get_fs(), get_ds())) {
+       if (eva_kernel_access()) {
                __asm__ __volatile__(
                        "move\t$4, %1\n\t"
                        "move\t$5, %2\n\t"
@@ -1426,7 +1459,7 @@ static inline long strnlen_user(const char __user *s, long n)
        long res;
 
        might_fault();
-       if (segment_eq(get_fs(), get_ds())) {
+       if (eva_kernel_access()) {
                __asm__ __volatile__(
                        "move\t$4, %1\n\t"
                        "move\t$5, %2\n\t"