Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / arch / x86 / lib / csum-wrappers_64.c
1 /*
2  * Copyright 2002, 2003 Andi Kleen, SuSE Labs.
3  * Subject to the GNU Public License v.2
4  *
5  * Wrappers of assembly checksum functions for x86-64.
6  */
7 #include <asm/checksum.h>
8 #include <linux/module.h>
9 #include <asm/smap.h>
10
11 /**
12  * csum_partial_copy_from_user - Copy and checksum from user space.
13  * @src: source address (user space)
14  * @dst: destination address
15  * @len: number of bytes to be copied.
16  * @isum: initial sum that is added into the result (32bit unfolded)
17  * @errp: set to -EFAULT for an bad source address.
18  *
19  * Returns an 32bit unfolded checksum of the buffer.
20  * src and dst are best aligned to 64bits.
21  */
22 __wsum
23 csum_partial_copy_from_user(const void __user *src, void *dst,
24                             int len, __wsum isum, int *errp)
25 {
26         might_sleep();
27         *errp = 0;
28
29         if (!likely(access_ok(VERIFY_READ, src, len)))
30                 goto out_err;
31
32         /*
33          * Why 6, not 7? To handle odd addresses aligned we
34          * would need to do considerable complications to fix the
35          * checksum which is defined as an 16bit accumulator. The
36          * fix alignment code is primarily for performance
37          * compatibility with 32bit and that will handle odd
38          * addresses slowly too.
39          */
40         if (unlikely((unsigned long)src & 6)) {
41                 while (((unsigned long)src & 6) && len >= 2) {
42                         __u16 val16;
43
44                         if (__get_user(val16, (const __u16 __user *)src))
45                                 goto out_err;
46
47                         *(__u16 *)dst = val16;
48                         isum = (__force __wsum)add32_with_carry(
49                                         (__force unsigned)isum, val16);
50                         src += 2;
51                         dst += 2;
52                         len -= 2;
53                 }
54         }
55         stac();
56         isum = csum_partial_copy_generic((__force const void *)src,
57                                 dst, len, isum, errp, NULL);
58         clac();
59         if (unlikely(*errp))
60                 goto out_err;
61
62         return isum;
63
64 out_err:
65         *errp = -EFAULT;
66         memset(dst, 0, len);
67
68         return isum;
69 }
70 EXPORT_SYMBOL(csum_partial_copy_from_user);
71
72 /**
73  * csum_partial_copy_to_user - Copy and checksum to user space.
74  * @src: source address
75  * @dst: destination address (user space)
76  * @len: number of bytes to be copied.
77  * @isum: initial sum that is added into the result (32bit unfolded)
78  * @errp: set to -EFAULT for an bad destination address.
79  *
80  * Returns an 32bit unfolded checksum of the buffer.
81  * src and dst are best aligned to 64bits.
82  */
83 __wsum
84 csum_partial_copy_to_user(const void *src, void __user *dst,
85                           int len, __wsum isum, int *errp)
86 {
87         __wsum ret;
88
89         might_sleep();
90
91         if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
92                 *errp = -EFAULT;
93                 return 0;
94         }
95
96         if (unlikely((unsigned long)dst & 6)) {
97                 while (((unsigned long)dst & 6) && len >= 2) {
98                         __u16 val16 = *(__u16 *)src;
99
100                         isum = (__force __wsum)add32_with_carry(
101                                         (__force unsigned)isum, val16);
102                         *errp = __put_user(val16, (__u16 __user *)dst);
103                         if (*errp)
104                                 return isum;
105                         src += 2;
106                         dst += 2;
107                         len -= 2;
108                 }
109         }
110
111         *errp = 0;
112         stac();
113         ret = csum_partial_copy_generic(src, (void __force *)dst,
114                                         len, isum, NULL, errp);
115         clac();
116         return ret;
117 }
118 EXPORT_SYMBOL(csum_partial_copy_to_user);
119
120 /**
121  * csum_partial_copy_nocheck - Copy and checksum.
122  * @src: source address
123  * @dst: destination address
124  * @len: number of bytes to be copied.
125  * @sum: initial sum that is added into the result (32bit unfolded)
126  *
127  * Returns an 32bit unfolded checksum of the buffer.
128  */
129 __wsum
130 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
131 {
132         return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
133 }
134 EXPORT_SYMBOL(csum_partial_copy_nocheck);
135
136 __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
137                         const struct in6_addr *daddr,
138                         __u32 len, unsigned short proto, __wsum sum)
139 {
140         __u64 rest, sum64;
141
142         rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
143                 (__force __u64)sum;
144
145         asm("   addq (%[saddr]),%[sum]\n"
146             "   adcq 8(%[saddr]),%[sum]\n"
147             "   adcq (%[daddr]),%[sum]\n"
148             "   adcq 8(%[daddr]),%[sum]\n"
149             "   adcq $0,%[sum]\n"
150
151             : [sum] "=r" (sum64)
152             : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr));
153
154         return csum_fold(
155                (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
156 }
157 EXPORT_SYMBOL(csum_ipv6_magic);