2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IP/TCP/UDP checksumming routines
8 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
9 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
10 * Tom May, <ftom@netcom.com>
11 * Pentium Pro/II routines:
12 * Alexander Kjeldaas <astor@guardian.no>
13 * Finn Arne Gangstad <finnag@guardian.no>
14 * Lots of code moved from tcp.c and ip.c; see those files
17 * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
19 * Andi Kleen, add zeroing on error
20 * converted to pure assembler
21 * Hirokazu Takata,Hiroyuki Kondo rewrite for the m32r architecture.
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
29 #include <linux/linkage.h>
30 #include <asm/assembler.h>
31 #include <asm/errno.h>
34 * computes a partial checksum, e.g. for TCP/UDP fragments
38 unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
42 #ifdef CONFIG_ISA_DUAL_ISSUE
45 * Experiments with Ethernet and SLIP connections show that buff
46 * is aligned on either a 2-byte or 4-byte boundary. We get at
47 * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
48 * Fortunately, it is easy to convert 2-byte alignment to 4-byte
49 * alignment for the unrolled loop.
55 ; r0: unsigned char *buff
57 ; r2: unsigned int sum
60 and3 r7, r0, #1 ; Check alignment.
61 beqz r7, 1f ; Jump if alignment is ok.
63 ldub r4, @r0 || addi r0, #1
64 ; clear c-bit || Alignment uses up bytes.
65 cmp r0, r0 || addi r1, #-1
66 ldi r3, #0 || addx r2, r4
70 and3 r4, r0, #2 ; Check alignment.
71 beqz r4, 2f ; Jump if alignment is ok.
72 ; clear c-bit || Alignment uses up two bytes.
73 cmp r0, r0 || addi r1, #-2
74 bgtz r1, 1f ; Jump if we had at least two bytes.
76 .fillinsn ; len(r1) was < 2. Deal with it.
79 lduh r4, @r0 || ldi r3, #0
80 addx r2, r4 || addi r0, #2
85 cmp r0, r0 ; clear c-bit
93 ld r3, @r0+ || addx r2, r3 ; +12
94 ld r4, @r0+ || addx r2, r4 ; +16
95 ld r5, @r0+ || addx r2, r5 ; +20
96 ld r3, @r0+ || addx r2, r3 ; +24
97 ld r4, @r0+ || addx r2, r4 ; +28
98 addx r2, r5 || addi r6, #-1
104 cmp r0, r0 ; This clears c-bit
106 2: and3 r6, r1, #0x1c ; withdraw len
111 3: ld r4, @r0+ || addi r6, #-1
116 cmp r0, r0 ; This clears c-bit
119 beqz r1, 7f ; if len == 0 goto end
121 beqz r6, 5f ; if len < 2 goto 5f(1byte)
122 lduh r4, @r0 || addi r0, #2
123 addi r1, #-2 || slli r4, #16
127 5: ldub r4, @r0 || ldi r1, #0
128 #ifndef __LITTLE_ENDIAN__
145 beqz r7, 1f ; swap the upper byte for the lower
153 addx r0, r2 || ldi r2, #0
157 #else /* not CONFIG_ISA_DUAL_ISSUE */
160 * Experiments with Ethernet and SLIP connections show that buff
161 * is aligned on either a 2-byte or 4-byte boundary. We get at
162 * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
163 * Fortunately, it is easy to convert 2-byte alignment to 4-byte
164 * alignment for the unrolled loop.
170 ; r0: unsigned char *buff
172 ; r2: unsigned int sum
176 and3 r7, r0, #1 ; Check alignment.
177 beqz r7, 1f ; Jump if alignment is ok.
181 addi r1, #-1 ; Alignment uses up bytes.
182 cmp r0, r0 ; clear c-bit
188 and3 r4, r0, #2 ; Check alignment.
189 beqz r4, 2f ; Jump if alignment is ok.
190 addi r1, #-2 ; Alignment uses up two bytes.
191 cmp r0, r0 ; clear c-bit
192 bgtz r1, 1f ; Jump if we had at least two bytes.
193 addi r1, #2 ; len(r1) was < 2. Deal with it.
206 cmp r0, r0 ; clear c-bit
230 cmp r0, r0 ; This clears c-bit
233 2: and3 r6, r1, #0x1c ; withdraw len
243 cmp r0, r0 ; This clears c-bit
247 beqz r1, 7f ; if len == 0 goto end
249 beqz r6, 5f ; if len < 2 goto 5f(1byte)
259 #ifndef __LITTLE_ENDIAN__
292 #endif /* not CONFIG_ISA_DUAL_ISSUE */
295 unsigned int csum_partial_copy_generic (const char *src, char *dst,
296 int len, int sum, int *src_err_ptr, int *dst_err_ptr)
300 * Copy from ds while checksumming, otherwise like csum_partial
302 * The macros SRC and DST specify the type of access for the instruction.
303 * thus we can call a custom exception handler for all access types.
305 * FIXME: could someone double-check whether I haven't mixed up some SRC and
306 * DST definitions? It's damn hard to trigger all cases. I hope I got
307 * them all but there's no guarantee.
310 ENTRY(csum_partial_copy_generic)