These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / arm / crypto / sha512-core.S_shipped
1
2 @ ====================================================================
3 @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
4 @ project. The module is, however, dual licensed under OpenSSL and
5 @ CRYPTOGAMS licenses depending on where you obtain it. For further
6 @ details see http://www.openssl.org/~appro/cryptogams/.
7 @
8 @ Permission to use under GPL terms is granted.
9 @ ====================================================================
10
11 @ SHA512 block procedure for ARMv4. September 2007.
12
13 @ This code is ~4.5 (four and a half) times faster than code generated
14 @ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
15 @ Xscale PXA250 core].
16 @
17 @ July 2010.
18 @
19 @ Rescheduling for dual-issue pipeline resulted in 6% improvement on
20 @ Cortex A8 core and ~40 cycles per processed byte.
21
22 @ February 2011.
23 @
24 @ Profiler-assisted and platform-specific optimization resulted in 7%
25 @ improvement on Coxtex A8 core and ~38 cycles per byte.
26
27 @ March 2011.
28 @
29 @ Add NEON implementation. On Cortex A8 it was measured to process
30 @ one byte in 23.3 cycles or ~60% faster than integer-only code.
31
32 @ August 2012.
33 @
34 @ Improve NEON performance by 12% on Snapdragon S4. In absolute
35 @ terms it's 22.6 cycles per byte, which is disappointing result.
36 @ Technical writers asserted that 3-way S4 pipeline can sustain
37 @ multiple NEON instructions per cycle, but dual NEON issue could
38 @ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
39 @ for further details. On side note Cortex-A15 processes one byte in
40 @ 16 cycles.
41
42 @ Byte order [in]dependence. =========================================
43 @
44 @ Originally caller was expected to maintain specific *dword* order in
45 @ h[0-7], namely with most significant dword at *lower* address, which
46 @ was reflected in below two parameters as 0 and 4. Now caller is
47 @ expected to maintain native byte order for whole 64-bit values.
48 #ifndef __KERNEL__
49 # include "arm_arch.h"
50 # define VFP_ABI_PUSH   vstmdb  sp!,{d8-d15}
51 # define VFP_ABI_POP    vldmia  sp!,{d8-d15}
52 #else
53 # define __ARM_ARCH__ __LINUX_ARM_ARCH__
54 # define __ARM_MAX_ARCH__ 7
55 # define VFP_ABI_PUSH
56 # define VFP_ABI_POP
57 #endif
58
59 #ifdef __ARMEL__
60 # define LO 0
61 # define HI 4
62 # define WORD64(hi0,lo0,hi1,lo1)        .word   lo0,hi0, lo1,hi1
63 #else
64 # define HI 0
65 # define LO 4
66 # define WORD64(hi0,lo0,hi1,lo1)        .word   hi0,lo0, hi1,lo1
67 #endif
68
69 .text
70 #if __ARM_ARCH__<7
71 .code   32
72 #else
73 .syntax unified
74 # ifdef __thumb2__
75 #  define adrl adr
76 .thumb
77 # else
78 .code   32
79 # endif
80 #endif
81
82 .type   K512,%object
83 .align  5
84 K512:
85 WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
86 WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
87 WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
88 WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
89 WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
90 WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
91 WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
92 WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
93 WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
94 WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
95 WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
96 WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
97 WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
98 WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
99 WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
100 WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
101 WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
102 WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
103 WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
104 WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
105 WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
106 WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
107 WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
108 WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
109 WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
110 WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
111 WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
112 WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
113 WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
114 WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
115 WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
116 WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
117 WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
118 WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
119 WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
120 WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
121 WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
122 WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
123 WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
124 WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
125 .size   K512,.-K512
126 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
127 .LOPENSSL_armcap:
128 .word   OPENSSL_armcap_P-sha512_block_data_order
129 .skip   32-4
130 #else
131 .skip   32
132 #endif
133
134 .global sha512_block_data_order
135 .type   sha512_block_data_order,%function
136 sha512_block_data_order:
137 #if __ARM_ARCH__<7
138         sub     r3,pc,#8                @ sha512_block_data_order
139 #else
140         adr     r3,sha512_block_data_order
141 #endif
142 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
143         ldr     r12,.LOPENSSL_armcap
144         ldr     r12,[r3,r12]            @ OPENSSL_armcap_P
145         tst     r12,#1
146         bne     .LNEON
147 #endif
148         add     r2,r1,r2,lsl#7  @ len to point at the end of inp
149         stmdb   sp!,{r4-r12,lr}
150         sub     r14,r3,#672             @ K512
151         sub     sp,sp,#9*8
152
153         ldr     r7,[r0,#32+LO]
154         ldr     r8,[r0,#32+HI]
155         ldr     r9, [r0,#48+LO]
156         ldr     r10, [r0,#48+HI]
157         ldr     r11, [r0,#56+LO]
158         ldr     r12, [r0,#56+HI]
159 .Loop:
160         str     r9, [sp,#48+0]
161         str     r10, [sp,#48+4]
162         str     r11, [sp,#56+0]
163         str     r12, [sp,#56+4]
164         ldr     r5,[r0,#0+LO]
165         ldr     r6,[r0,#0+HI]
166         ldr     r3,[r0,#8+LO]
167         ldr     r4,[r0,#8+HI]
168         ldr     r9, [r0,#16+LO]
169         ldr     r10, [r0,#16+HI]
170         ldr     r11, [r0,#24+LO]
171         ldr     r12, [r0,#24+HI]
172         str     r3,[sp,#8+0]
173         str     r4,[sp,#8+4]
174         str     r9, [sp,#16+0]
175         str     r10, [sp,#16+4]
176         str     r11, [sp,#24+0]
177         str     r12, [sp,#24+4]
178         ldr     r3,[r0,#40+LO]
179         ldr     r4,[r0,#40+HI]
180         str     r3,[sp,#40+0]
181         str     r4,[sp,#40+4]
182
183 .L00_15:
184 #if __ARM_ARCH__<7
185         ldrb    r3,[r1,#7]
186         ldrb    r9, [r1,#6]
187         ldrb    r10, [r1,#5]
188         ldrb    r11, [r1,#4]
189         ldrb    r4,[r1,#3]
190         ldrb    r12, [r1,#2]
191         orr     r3,r3,r9,lsl#8
192         ldrb    r9, [r1,#1]
193         orr     r3,r3,r10,lsl#16
194         ldrb    r10, [r1],#8
195         orr     r3,r3,r11,lsl#24
196         orr     r4,r4,r12,lsl#8
197         orr     r4,r4,r9,lsl#16
198         orr     r4,r4,r10,lsl#24
199 #else
200         ldr     r3,[r1,#4]
201         ldr     r4,[r1],#8
202 #ifdef __ARMEL__
203         rev     r3,r3
204         rev     r4,r4
205 #endif
206 #endif
207         @ Sigma1(x)     (ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
208         @ LO            lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
209         @ HI            hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
210         mov     r9,r7,lsr#14
211         str     r3,[sp,#64+0]
212         mov     r10,r8,lsr#14
213         str     r4,[sp,#64+4]
214         eor     r9,r9,r8,lsl#18
215         ldr     r11,[sp,#56+0]  @ h.lo
216         eor     r10,r10,r7,lsl#18
217         ldr     r12,[sp,#56+4]  @ h.hi
218         eor     r9,r9,r7,lsr#18
219         eor     r10,r10,r8,lsr#18
220         eor     r9,r9,r8,lsl#14
221         eor     r10,r10,r7,lsl#14
222         eor     r9,r9,r8,lsr#9
223         eor     r10,r10,r7,lsr#9
224         eor     r9,r9,r7,lsl#23
225         eor     r10,r10,r8,lsl#23       @ Sigma1(e)
226         adds    r3,r3,r9
227         ldr     r9,[sp,#40+0]   @ f.lo
228         adc     r4,r4,r10               @ T += Sigma1(e)
229         ldr     r10,[sp,#40+4]  @ f.hi
230         adds    r3,r3,r11
231         ldr     r11,[sp,#48+0]  @ g.lo
232         adc     r4,r4,r12               @ T += h
233         ldr     r12,[sp,#48+4]  @ g.hi
234
235         eor     r9,r9,r11
236         str     r7,[sp,#32+0]
237         eor     r10,r10,r12
238         str     r8,[sp,#32+4]
239         and     r9,r9,r7
240         str     r5,[sp,#0+0]
241         and     r10,r10,r8
242         str     r6,[sp,#0+4]
243         eor     r9,r9,r11
244         ldr     r11,[r14,#LO]   @ K[i].lo
245         eor     r10,r10,r12             @ Ch(e,f,g)
246         ldr     r12,[r14,#HI]   @ K[i].hi
247
248         adds    r3,r3,r9
249         ldr     r7,[sp,#24+0]   @ d.lo
250         adc     r4,r4,r10               @ T += Ch(e,f,g)
251         ldr     r8,[sp,#24+4]   @ d.hi
252         adds    r3,r3,r11
253         and     r9,r11,#0xff
254         adc     r4,r4,r12               @ T += K[i]
255         adds    r7,r7,r3
256         ldr     r11,[sp,#8+0]   @ b.lo
257         adc     r8,r8,r4                @ d += T
258         teq     r9,#148
259
260         ldr     r12,[sp,#16+0]  @ c.lo
261 #if __ARM_ARCH__>=7
262         it      eq                      @ Thumb2 thing, sanity check in ARM
263 #endif
264         orreq   r14,r14,#1
265         @ Sigma0(x)     (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
266         @ LO            lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
267         @ HI            hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
268         mov     r9,r5,lsr#28
269         mov     r10,r6,lsr#28
270         eor     r9,r9,r6,lsl#4
271         eor     r10,r10,r5,lsl#4
272         eor     r9,r9,r6,lsr#2
273         eor     r10,r10,r5,lsr#2
274         eor     r9,r9,r5,lsl#30
275         eor     r10,r10,r6,lsl#30
276         eor     r9,r9,r6,lsr#7
277         eor     r10,r10,r5,lsr#7
278         eor     r9,r9,r5,lsl#25
279         eor     r10,r10,r6,lsl#25       @ Sigma0(a)
280         adds    r3,r3,r9
281         and     r9,r5,r11
282         adc     r4,r4,r10               @ T += Sigma0(a)
283
284         ldr     r10,[sp,#8+4]   @ b.hi
285         orr     r5,r5,r11
286         ldr     r11,[sp,#16+4]  @ c.hi
287         and     r5,r5,r12
288         and     r12,r6,r10
289         orr     r6,r6,r10
290         orr     r5,r5,r9                @ Maj(a,b,c).lo
291         and     r6,r6,r11
292         adds    r5,r5,r3
293         orr     r6,r6,r12               @ Maj(a,b,c).hi
294         sub     sp,sp,#8
295         adc     r6,r6,r4                @ h += T
296         tst     r14,#1
297         add     r14,r14,#8
298         tst     r14,#1
299         beq     .L00_15
300         ldr     r9,[sp,#184+0]
301         ldr     r10,[sp,#184+4]
302         bic     r14,r14,#1
303 .L16_79:
304         @ sigma0(x)     (ROTR((x),1)  ^ ROTR((x),8)  ^ ((x)>>7))
305         @ LO            lo>>1^hi<<31  ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
306         @ HI            hi>>1^lo<<31  ^ hi>>8^lo<<24 ^ hi>>7
307         mov     r3,r9,lsr#1
308         ldr     r11,[sp,#80+0]
309         mov     r4,r10,lsr#1
310         ldr     r12,[sp,#80+4]
311         eor     r3,r3,r10,lsl#31
312         eor     r4,r4,r9,lsl#31
313         eor     r3,r3,r9,lsr#8
314         eor     r4,r4,r10,lsr#8
315         eor     r3,r3,r10,lsl#24
316         eor     r4,r4,r9,lsl#24
317         eor     r3,r3,r9,lsr#7
318         eor     r4,r4,r10,lsr#7
319         eor     r3,r3,r10,lsl#25
320
321         @ sigma1(x)     (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
322         @ LO            lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
323         @ HI            hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
324         mov     r9,r11,lsr#19
325         mov     r10,r12,lsr#19
326         eor     r9,r9,r12,lsl#13
327         eor     r10,r10,r11,lsl#13
328         eor     r9,r9,r12,lsr#29
329         eor     r10,r10,r11,lsr#29
330         eor     r9,r9,r11,lsl#3
331         eor     r10,r10,r12,lsl#3
332         eor     r9,r9,r11,lsr#6
333         eor     r10,r10,r12,lsr#6
334         ldr     r11,[sp,#120+0]
335         eor     r9,r9,r12,lsl#26
336
337         ldr     r12,[sp,#120+4]
338         adds    r3,r3,r9
339         ldr     r9,[sp,#192+0]
340         adc     r4,r4,r10
341
342         ldr     r10,[sp,#192+4]
343         adds    r3,r3,r11
344         adc     r4,r4,r12
345         adds    r3,r3,r9
346         adc     r4,r4,r10
347         @ Sigma1(x)     (ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
348         @ LO            lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
349         @ HI            hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
350         mov     r9,r7,lsr#14
351         str     r3,[sp,#64+0]
352         mov     r10,r8,lsr#14
353         str     r4,[sp,#64+4]
354         eor     r9,r9,r8,lsl#18
355         ldr     r11,[sp,#56+0]  @ h.lo
356         eor     r10,r10,r7,lsl#18
357         ldr     r12,[sp,#56+4]  @ h.hi
358         eor     r9,r9,r7,lsr#18
359         eor     r10,r10,r8,lsr#18
360         eor     r9,r9,r8,lsl#14
361         eor     r10,r10,r7,lsl#14
362         eor     r9,r9,r8,lsr#9
363         eor     r10,r10,r7,lsr#9
364         eor     r9,r9,r7,lsl#23
365         eor     r10,r10,r8,lsl#23       @ Sigma1(e)
366         adds    r3,r3,r9
367         ldr     r9,[sp,#40+0]   @ f.lo
368         adc     r4,r4,r10               @ T += Sigma1(e)
369         ldr     r10,[sp,#40+4]  @ f.hi
370         adds    r3,r3,r11
371         ldr     r11,[sp,#48+0]  @ g.lo
372         adc     r4,r4,r12               @ T += h
373         ldr     r12,[sp,#48+4]  @ g.hi
374
375         eor     r9,r9,r11
376         str     r7,[sp,#32+0]
377         eor     r10,r10,r12
378         str     r8,[sp,#32+4]
379         and     r9,r9,r7
380         str     r5,[sp,#0+0]
381         and     r10,r10,r8
382         str     r6,[sp,#0+4]
383         eor     r9,r9,r11
384         ldr     r11,[r14,#LO]   @ K[i].lo
385         eor     r10,r10,r12             @ Ch(e,f,g)
386         ldr     r12,[r14,#HI]   @ K[i].hi
387
388         adds    r3,r3,r9
389         ldr     r7,[sp,#24+0]   @ d.lo
390         adc     r4,r4,r10               @ T += Ch(e,f,g)
391         ldr     r8,[sp,#24+4]   @ d.hi
392         adds    r3,r3,r11
393         and     r9,r11,#0xff
394         adc     r4,r4,r12               @ T += K[i]
395         adds    r7,r7,r3
396         ldr     r11,[sp,#8+0]   @ b.lo
397         adc     r8,r8,r4                @ d += T
398         teq     r9,#23
399
400         ldr     r12,[sp,#16+0]  @ c.lo
401 #if __ARM_ARCH__>=7
402         it      eq                      @ Thumb2 thing, sanity check in ARM
403 #endif
404         orreq   r14,r14,#1
405         @ Sigma0(x)     (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
406         @ LO            lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
407         @ HI            hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
408         mov     r9,r5,lsr#28
409         mov     r10,r6,lsr#28
410         eor     r9,r9,r6,lsl#4
411         eor     r10,r10,r5,lsl#4
412         eor     r9,r9,r6,lsr#2
413         eor     r10,r10,r5,lsr#2
414         eor     r9,r9,r5,lsl#30
415         eor     r10,r10,r6,lsl#30
416         eor     r9,r9,r6,lsr#7
417         eor     r10,r10,r5,lsr#7
418         eor     r9,r9,r5,lsl#25
419         eor     r10,r10,r6,lsl#25       @ Sigma0(a)
420         adds    r3,r3,r9
421         and     r9,r5,r11
422         adc     r4,r4,r10               @ T += Sigma0(a)
423
424         ldr     r10,[sp,#8+4]   @ b.hi
425         orr     r5,r5,r11
426         ldr     r11,[sp,#16+4]  @ c.hi
427         and     r5,r5,r12
428         and     r12,r6,r10
429         orr     r6,r6,r10
430         orr     r5,r5,r9                @ Maj(a,b,c).lo
431         and     r6,r6,r11
432         adds    r5,r5,r3
433         orr     r6,r6,r12               @ Maj(a,b,c).hi
434         sub     sp,sp,#8
435         adc     r6,r6,r4                @ h += T
436         tst     r14,#1
437         add     r14,r14,#8
438 #if __ARM_ARCH__>=7
439         ittt    eq                      @ Thumb2 thing, sanity check in ARM
440 #endif
441         ldreq   r9,[sp,#184+0]
442         ldreq   r10,[sp,#184+4]
443         beq     .L16_79
444         bic     r14,r14,#1
445
446         ldr     r3,[sp,#8+0]
447         ldr     r4,[sp,#8+4]
448         ldr     r9, [r0,#0+LO]
449         ldr     r10, [r0,#0+HI]
450         ldr     r11, [r0,#8+LO]
451         ldr     r12, [r0,#8+HI]
452         adds    r9,r5,r9
453         str     r9, [r0,#0+LO]
454         adc     r10,r6,r10
455         str     r10, [r0,#0+HI]
456         adds    r11,r3,r11
457         str     r11, [r0,#8+LO]
458         adc     r12,r4,r12
459         str     r12, [r0,#8+HI]
460
461         ldr     r5,[sp,#16+0]
462         ldr     r6,[sp,#16+4]
463         ldr     r3,[sp,#24+0]
464         ldr     r4,[sp,#24+4]
465         ldr     r9, [r0,#16+LO]
466         ldr     r10, [r0,#16+HI]
467         ldr     r11, [r0,#24+LO]
468         ldr     r12, [r0,#24+HI]
469         adds    r9,r5,r9
470         str     r9, [r0,#16+LO]
471         adc     r10,r6,r10
472         str     r10, [r0,#16+HI]
473         adds    r11,r3,r11
474         str     r11, [r0,#24+LO]
475         adc     r12,r4,r12
476         str     r12, [r0,#24+HI]
477
478         ldr     r3,[sp,#40+0]
479         ldr     r4,[sp,#40+4]
480         ldr     r9, [r0,#32+LO]
481         ldr     r10, [r0,#32+HI]
482         ldr     r11, [r0,#40+LO]
483         ldr     r12, [r0,#40+HI]
484         adds    r7,r7,r9
485         str     r7,[r0,#32+LO]
486         adc     r8,r8,r10
487         str     r8,[r0,#32+HI]
488         adds    r11,r3,r11
489         str     r11, [r0,#40+LO]
490         adc     r12,r4,r12
491         str     r12, [r0,#40+HI]
492
493         ldr     r5,[sp,#48+0]
494         ldr     r6,[sp,#48+4]
495         ldr     r3,[sp,#56+0]
496         ldr     r4,[sp,#56+4]
497         ldr     r9, [r0,#48+LO]
498         ldr     r10, [r0,#48+HI]
499         ldr     r11, [r0,#56+LO]
500         ldr     r12, [r0,#56+HI]
501         adds    r9,r5,r9
502         str     r9, [r0,#48+LO]
503         adc     r10,r6,r10
504         str     r10, [r0,#48+HI]
505         adds    r11,r3,r11
506         str     r11, [r0,#56+LO]
507         adc     r12,r4,r12
508         str     r12, [r0,#56+HI]
509
510         add     sp,sp,#640
511         sub     r14,r14,#640
512
513         teq     r1,r2
514         bne     .Loop
515
516         add     sp,sp,#8*9              @ destroy frame
517 #if __ARM_ARCH__>=5
518         ldmia   sp!,{r4-r12,pc}
519 #else
520         ldmia   sp!,{r4-r12,lr}
521         tst     lr,#1
522         moveq   pc,lr                   @ be binary compatible with V4, yet
523         .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
524 #endif
525 .size   sha512_block_data_order,.-sha512_block_data_order
526 #if __ARM_MAX_ARCH__>=7
527 .arch   armv7-a
528 .fpu    neon
529
530 .global sha512_block_data_order_neon
531 .type   sha512_block_data_order_neon,%function
532 .align  4
533 sha512_block_data_order_neon:
534 .LNEON:
535         dmb                             @ errata #451034 on early Cortex A8
536         add     r2,r1,r2,lsl#7  @ len to point at the end of inp
537         VFP_ABI_PUSH
538         adrl    r3,K512
539         vldmia  r0,{d16-d23}            @ load context
540 .Loop_neon:
541         vshr.u64        d24,d20,#14     @ 0
542 #if 0<16
543         vld1.64         {d0},[r1]!      @ handles unaligned
544 #endif
545         vshr.u64        d25,d20,#18
546 #if 0>0
547          vadd.i64       d16,d30                 @ h+=Maj from the past
548 #endif
549         vshr.u64        d26,d20,#41
550         vld1.64         {d28},[r3,:64]! @ K[i++]
551         vsli.64         d24,d20,#50
552         vsli.64         d25,d20,#46
553         vmov            d29,d20
554         vsli.64         d26,d20,#23
555 #if 0<16 && defined(__ARMEL__)
556         vrev64.8        d0,d0
557 #endif
558         veor            d25,d24
559         vbsl            d29,d21,d22             @ Ch(e,f,g)
560         vshr.u64        d24,d16,#28
561         veor            d26,d25                 @ Sigma1(e)
562         vadd.i64        d27,d29,d23
563         vshr.u64        d25,d16,#34
564         vsli.64         d24,d16,#36
565         vadd.i64        d27,d26
566         vshr.u64        d26,d16,#39
567         vadd.i64        d28,d0
568         vsli.64         d25,d16,#30
569         veor            d30,d16,d17
570         vsli.64         d26,d16,#25
571         veor            d23,d24,d25
572         vadd.i64        d27,d28
573         vbsl            d30,d18,d17             @ Maj(a,b,c)
574         veor            d23,d26                 @ Sigma0(a)
575         vadd.i64        d19,d27
576         vadd.i64        d30,d27
577         @ vadd.i64      d23,d30
578         vshr.u64        d24,d19,#14     @ 1
579 #if 1<16
580         vld1.64         {d1},[r1]!      @ handles unaligned
581 #endif
582         vshr.u64        d25,d19,#18
583 #if 1>0
584          vadd.i64       d23,d30                 @ h+=Maj from the past
585 #endif
586         vshr.u64        d26,d19,#41
587         vld1.64         {d28},[r3,:64]! @ K[i++]
588         vsli.64         d24,d19,#50
589         vsli.64         d25,d19,#46
590         vmov            d29,d19
591         vsli.64         d26,d19,#23
592 #if 1<16 && defined(__ARMEL__)
593         vrev64.8        d1,d1
594 #endif
595         veor            d25,d24
596         vbsl            d29,d20,d21             @ Ch(e,f,g)
597         vshr.u64        d24,d23,#28
598         veor            d26,d25                 @ Sigma1(e)
599         vadd.i64        d27,d29,d22
600         vshr.u64        d25,d23,#34
601         vsli.64         d24,d23,#36
602         vadd.i64        d27,d26
603         vshr.u64        d26,d23,#39
604         vadd.i64        d28,d1
605         vsli.64         d25,d23,#30
606         veor            d30,d23,d16
607         vsli.64         d26,d23,#25
608         veor            d22,d24,d25
609         vadd.i64        d27,d28
610         vbsl            d30,d17,d16             @ Maj(a,b,c)
611         veor            d22,d26                 @ Sigma0(a)
612         vadd.i64        d18,d27
613         vadd.i64        d30,d27
614         @ vadd.i64      d22,d30
615         vshr.u64        d24,d18,#14     @ 2
616 #if 2<16
617         vld1.64         {d2},[r1]!      @ handles unaligned
618 #endif
619         vshr.u64        d25,d18,#18
620 #if 2>0
621          vadd.i64       d22,d30                 @ h+=Maj from the past
622 #endif
623         vshr.u64        d26,d18,#41
624         vld1.64         {d28},[r3,:64]! @ K[i++]
625         vsli.64         d24,d18,#50
626         vsli.64         d25,d18,#46
627         vmov            d29,d18
628         vsli.64         d26,d18,#23
629 #if 2<16 && defined(__ARMEL__)
630         vrev64.8        d2,d2
631 #endif
632         veor            d25,d24
633         vbsl            d29,d19,d20             @ Ch(e,f,g)
634         vshr.u64        d24,d22,#28
635         veor            d26,d25                 @ Sigma1(e)
636         vadd.i64        d27,d29,d21
637         vshr.u64        d25,d22,#34
638         vsli.64         d24,d22,#36
639         vadd.i64        d27,d26
640         vshr.u64        d26,d22,#39
641         vadd.i64        d28,d2
642         vsli.64         d25,d22,#30
643         veor            d30,d22,d23
644         vsli.64         d26,d22,#25
645         veor            d21,d24,d25
646         vadd.i64        d27,d28
647         vbsl            d30,d16,d23             @ Maj(a,b,c)
648         veor            d21,d26                 @ Sigma0(a)
649         vadd.i64        d17,d27
650         vadd.i64        d30,d27
651         @ vadd.i64      d21,d30
652         vshr.u64        d24,d17,#14     @ 3
653 #if 3<16
654         vld1.64         {d3},[r1]!      @ handles unaligned
655 #endif
656         vshr.u64        d25,d17,#18
657 #if 3>0
658          vadd.i64       d21,d30                 @ h+=Maj from the past
659 #endif
660         vshr.u64        d26,d17,#41
661         vld1.64         {d28},[r3,:64]! @ K[i++]
662         vsli.64         d24,d17,#50
663         vsli.64         d25,d17,#46
664         vmov            d29,d17
665         vsli.64         d26,d17,#23
666 #if 3<16 && defined(__ARMEL__)
667         vrev64.8        d3,d3
668 #endif
669         veor            d25,d24
670         vbsl            d29,d18,d19             @ Ch(e,f,g)
671         vshr.u64        d24,d21,#28
672         veor            d26,d25                 @ Sigma1(e)
673         vadd.i64        d27,d29,d20
674         vshr.u64        d25,d21,#34
675         vsli.64         d24,d21,#36
676         vadd.i64        d27,d26
677         vshr.u64        d26,d21,#39
678         vadd.i64        d28,d3
679         vsli.64         d25,d21,#30
680         veor            d30,d21,d22
681         vsli.64         d26,d21,#25
682         veor            d20,d24,d25
683         vadd.i64        d27,d28
684         vbsl            d30,d23,d22             @ Maj(a,b,c)
685         veor            d20,d26                 @ Sigma0(a)
686         vadd.i64        d16,d27
687         vadd.i64        d30,d27
688         @ vadd.i64      d20,d30
689         vshr.u64        d24,d16,#14     @ 4
690 #if 4<16
691         vld1.64         {d4},[r1]!      @ handles unaligned
692 #endif
693         vshr.u64        d25,d16,#18
694 #if 4>0
695          vadd.i64       d20,d30                 @ h+=Maj from the past
696 #endif
697         vshr.u64        d26,d16,#41
698         vld1.64         {d28},[r3,:64]! @ K[i++]
699         vsli.64         d24,d16,#50
700         vsli.64         d25,d16,#46
701         vmov            d29,d16
702         vsli.64         d26,d16,#23
703 #if 4<16 && defined(__ARMEL__)
704         vrev64.8        d4,d4
705 #endif
706         veor            d25,d24
707         vbsl            d29,d17,d18             @ Ch(e,f,g)
708         vshr.u64        d24,d20,#28
709         veor            d26,d25                 @ Sigma1(e)
710         vadd.i64        d27,d29,d19
711         vshr.u64        d25,d20,#34
712         vsli.64         d24,d20,#36
713         vadd.i64        d27,d26
714         vshr.u64        d26,d20,#39
715         vadd.i64        d28,d4
716         vsli.64         d25,d20,#30
717         veor            d30,d20,d21
718         vsli.64         d26,d20,#25
719         veor            d19,d24,d25
720         vadd.i64        d27,d28
721         vbsl            d30,d22,d21             @ Maj(a,b,c)
722         veor            d19,d26                 @ Sigma0(a)
723         vadd.i64        d23,d27
724         vadd.i64        d30,d27
725         @ vadd.i64      d19,d30
726         vshr.u64        d24,d23,#14     @ 5
727 #if 5<16
728         vld1.64         {d5},[r1]!      @ handles unaligned
729 #endif
730         vshr.u64        d25,d23,#18
731 #if 5>0
732          vadd.i64       d19,d30                 @ h+=Maj from the past
733 #endif
734         vshr.u64        d26,d23,#41
735         vld1.64         {d28},[r3,:64]! @ K[i++]
736         vsli.64         d24,d23,#50
737         vsli.64         d25,d23,#46
738         vmov            d29,d23
739         vsli.64         d26,d23,#23
740 #if 5<16 && defined(__ARMEL__)
741         vrev64.8        d5,d5
742 #endif
743         veor            d25,d24
744         vbsl            d29,d16,d17             @ Ch(e,f,g)
745         vshr.u64        d24,d19,#28
746         veor            d26,d25                 @ Sigma1(e)
747         vadd.i64        d27,d29,d18
748         vshr.u64        d25,d19,#34
749         vsli.64         d24,d19,#36
750         vadd.i64        d27,d26
751         vshr.u64        d26,d19,#39
752         vadd.i64        d28,d5
753         vsli.64         d25,d19,#30
754         veor            d30,d19,d20
755         vsli.64         d26,d19,#25
756         veor            d18,d24,d25
757         vadd.i64        d27,d28
758         vbsl            d30,d21,d20             @ Maj(a,b,c)
759         veor            d18,d26                 @ Sigma0(a)
760         vadd.i64        d22,d27
761         vadd.i64        d30,d27
762         @ vadd.i64      d18,d30
763         vshr.u64        d24,d22,#14     @ 6
764 #if 6<16
765         vld1.64         {d6},[r1]!      @ handles unaligned
766 #endif
767         vshr.u64        d25,d22,#18
768 #if 6>0
769          vadd.i64       d18,d30                 @ h+=Maj from the past
770 #endif
771         vshr.u64        d26,d22,#41
772         vld1.64         {d28},[r3,:64]! @ K[i++]
773         vsli.64         d24,d22,#50
774         vsli.64         d25,d22,#46
775         vmov            d29,d22
776         vsli.64         d26,d22,#23
777 #if 6<16 && defined(__ARMEL__)
778         vrev64.8        d6,d6
779 #endif
780         veor            d25,d24
781         vbsl            d29,d23,d16             @ Ch(e,f,g)
782         vshr.u64        d24,d18,#28
783         veor            d26,d25                 @ Sigma1(e)
784         vadd.i64        d27,d29,d17
785         vshr.u64        d25,d18,#34
786         vsli.64         d24,d18,#36
787         vadd.i64        d27,d26
788         vshr.u64        d26,d18,#39
789         vadd.i64        d28,d6
790         vsli.64         d25,d18,#30
791         veor            d30,d18,d19
792         vsli.64         d26,d18,#25
793         veor            d17,d24,d25
794         vadd.i64        d27,d28
795         vbsl            d30,d20,d19             @ Maj(a,b,c)
796         veor            d17,d26                 @ Sigma0(a)
797         vadd.i64        d21,d27
798         vadd.i64        d30,d27
799         @ vadd.i64      d17,d30
800         vshr.u64        d24,d21,#14     @ 7
801 #if 7<16
802         vld1.64         {d7},[r1]!      @ handles unaligned
803 #endif
804         vshr.u64        d25,d21,#18
805 #if 7>0
806          vadd.i64       d17,d30                 @ h+=Maj from the past
807 #endif
808         vshr.u64        d26,d21,#41
809         vld1.64         {d28},[r3,:64]! @ K[i++]
810         vsli.64         d24,d21,#50
811         vsli.64         d25,d21,#46
812         vmov            d29,d21
813         vsli.64         d26,d21,#23
814 #if 7<16 && defined(__ARMEL__)
815         vrev64.8        d7,d7
816 #endif
817         veor            d25,d24
818         vbsl            d29,d22,d23             @ Ch(e,f,g)
819         vshr.u64        d24,d17,#28
820         veor            d26,d25                 @ Sigma1(e)
821         vadd.i64        d27,d29,d16
822         vshr.u64        d25,d17,#34
823         vsli.64         d24,d17,#36
824         vadd.i64        d27,d26
825         vshr.u64        d26,d17,#39
826         vadd.i64        d28,d7
827         vsli.64         d25,d17,#30
828         veor            d30,d17,d18
829         vsli.64         d26,d17,#25
830         veor            d16,d24,d25
831         vadd.i64        d27,d28
832         vbsl            d30,d19,d18             @ Maj(a,b,c)
833         veor            d16,d26                 @ Sigma0(a)
834         vadd.i64        d20,d27
835         vadd.i64        d30,d27
836         @ vadd.i64      d16,d30
837         vshr.u64        d24,d20,#14     @ 8
838 #if 8<16
839         vld1.64         {d8},[r1]!      @ handles unaligned
840 #endif
841         vshr.u64        d25,d20,#18
842 #if 8>0
843          vadd.i64       d16,d30                 @ h+=Maj from the past
844 #endif
845         vshr.u64        d26,d20,#41
846         vld1.64         {d28},[r3,:64]! @ K[i++]
847         vsli.64         d24,d20,#50
848         vsli.64         d25,d20,#46
849         vmov            d29,d20
850         vsli.64         d26,d20,#23
851 #if 8<16 && defined(__ARMEL__)
852         vrev64.8        d8,d8
853 #endif
854         veor            d25,d24
855         vbsl            d29,d21,d22             @ Ch(e,f,g)
856         vshr.u64        d24,d16,#28
857         veor            d26,d25                 @ Sigma1(e)
858         vadd.i64        d27,d29,d23
859         vshr.u64        d25,d16,#34
860         vsli.64         d24,d16,#36
861         vadd.i64        d27,d26
862         vshr.u64        d26,d16,#39
863         vadd.i64        d28,d8
864         vsli.64         d25,d16,#30
865         veor            d30,d16,d17
866         vsli.64         d26,d16,#25
867         veor            d23,d24,d25
868         vadd.i64        d27,d28
869         vbsl            d30,d18,d17             @ Maj(a,b,c)
870         veor            d23,d26                 @ Sigma0(a)
871         vadd.i64        d19,d27
872         vadd.i64        d30,d27
873         @ vadd.i64      d23,d30
874         vshr.u64        d24,d19,#14     @ 9
875 #if 9<16
876         vld1.64         {d9},[r1]!      @ handles unaligned
877 #endif
878         vshr.u64        d25,d19,#18
879 #if 9>0
880          vadd.i64       d23,d30                 @ h+=Maj from the past
881 #endif
882         vshr.u64        d26,d19,#41
883         vld1.64         {d28},[r3,:64]! @ K[i++]
884         vsli.64         d24,d19,#50
885         vsli.64         d25,d19,#46
886         vmov            d29,d19
887         vsli.64         d26,d19,#23
888 #if 9<16 && defined(__ARMEL__)
889         vrev64.8        d9,d9
890 #endif
891         veor            d25,d24
892         vbsl            d29,d20,d21             @ Ch(e,f,g)
893         vshr.u64        d24,d23,#28
894         veor            d26,d25                 @ Sigma1(e)
895         vadd.i64        d27,d29,d22
896         vshr.u64        d25,d23,#34
897         vsli.64         d24,d23,#36
898         vadd.i64        d27,d26
899         vshr.u64        d26,d23,#39
900         vadd.i64        d28,d9
901         vsli.64         d25,d23,#30
902         veor            d30,d23,d16
903         vsli.64         d26,d23,#25
904         veor            d22,d24,d25
905         vadd.i64        d27,d28
906         vbsl            d30,d17,d16             @ Maj(a,b,c)
907         veor            d22,d26                 @ Sigma0(a)
908         vadd.i64        d18,d27
909         vadd.i64        d30,d27
910         @ vadd.i64      d22,d30
911         vshr.u64        d24,d18,#14     @ 10
912 #if 10<16
913         vld1.64         {d10},[r1]!     @ handles unaligned
914 #endif
915         vshr.u64        d25,d18,#18
916 #if 10>0
917          vadd.i64       d22,d30                 @ h+=Maj from the past
918 #endif
919         vshr.u64        d26,d18,#41
920         vld1.64         {d28},[r3,:64]! @ K[i++]
921         vsli.64         d24,d18,#50
922         vsli.64         d25,d18,#46
923         vmov            d29,d18
924         vsli.64         d26,d18,#23
925 #if 10<16 && defined(__ARMEL__)
926         vrev64.8        d10,d10
927 #endif
928         veor            d25,d24
929         vbsl            d29,d19,d20             @ Ch(e,f,g)
930         vshr.u64        d24,d22,#28
931         veor            d26,d25                 @ Sigma1(e)
932         vadd.i64        d27,d29,d21
933         vshr.u64        d25,d22,#34
934         vsli.64         d24,d22,#36
935         vadd.i64        d27,d26
936         vshr.u64        d26,d22,#39
937         vadd.i64        d28,d10
938         vsli.64         d25,d22,#30
939         veor            d30,d22,d23
940         vsli.64         d26,d22,#25
941         veor            d21,d24,d25
942         vadd.i64        d27,d28
943         vbsl            d30,d16,d23             @ Maj(a,b,c)
944         veor            d21,d26                 @ Sigma0(a)
945         vadd.i64        d17,d27
946         vadd.i64        d30,d27
947         @ vadd.i64      d21,d30
948         vshr.u64        d24,d17,#14     @ 11
949 #if 11<16
950         vld1.64         {d11},[r1]!     @ handles unaligned
951 #endif
952         vshr.u64        d25,d17,#18
953 #if 11>0
954          vadd.i64       d21,d30                 @ h+=Maj from the past
955 #endif
956         vshr.u64        d26,d17,#41
957         vld1.64         {d28},[r3,:64]! @ K[i++]
958         vsli.64         d24,d17,#50
959         vsli.64         d25,d17,#46
960         vmov            d29,d17
961         vsli.64         d26,d17,#23
962 #if 11<16 && defined(__ARMEL__)
963         vrev64.8        d11,d11
964 #endif
965         veor            d25,d24
966         vbsl            d29,d18,d19             @ Ch(e,f,g)
967         vshr.u64        d24,d21,#28
968         veor            d26,d25                 @ Sigma1(e)
969         vadd.i64        d27,d29,d20
970         vshr.u64        d25,d21,#34
971         vsli.64         d24,d21,#36
972         vadd.i64        d27,d26
973         vshr.u64        d26,d21,#39
974         vadd.i64        d28,d11
975         vsli.64         d25,d21,#30
976         veor            d30,d21,d22
977         vsli.64         d26,d21,#25
978         veor            d20,d24,d25
979         vadd.i64        d27,d28
980         vbsl            d30,d23,d22             @ Maj(a,b,c)
981         veor            d20,d26                 @ Sigma0(a)
982         vadd.i64        d16,d27
983         vadd.i64        d30,d27
984         @ vadd.i64      d20,d30
985         vshr.u64        d24,d16,#14     @ 12
986 #if 12<16
987         vld1.64         {d12},[r1]!     @ handles unaligned
988 #endif
989         vshr.u64        d25,d16,#18
990 #if 12>0
991          vadd.i64       d20,d30                 @ h+=Maj from the past
992 #endif
993         vshr.u64        d26,d16,#41
994         vld1.64         {d28},[r3,:64]! @ K[i++]
995         vsli.64         d24,d16,#50
996         vsli.64         d25,d16,#46
997         vmov            d29,d16
998         vsli.64         d26,d16,#23
999 #if 12<16 && defined(__ARMEL__)
1000         vrev64.8        d12,d12
1001 #endif
1002         veor            d25,d24
1003         vbsl            d29,d17,d18             @ Ch(e,f,g)
1004         vshr.u64        d24,d20,#28
1005         veor            d26,d25                 @ Sigma1(e)
1006         vadd.i64        d27,d29,d19
1007         vshr.u64        d25,d20,#34
1008         vsli.64         d24,d20,#36
1009         vadd.i64        d27,d26
1010         vshr.u64        d26,d20,#39
1011         vadd.i64        d28,d12
1012         vsli.64         d25,d20,#30
1013         veor            d30,d20,d21
1014         vsli.64         d26,d20,#25
1015         veor            d19,d24,d25
1016         vadd.i64        d27,d28
1017         vbsl            d30,d22,d21             @ Maj(a,b,c)
1018         veor            d19,d26                 @ Sigma0(a)
1019         vadd.i64        d23,d27
1020         vadd.i64        d30,d27
1021         @ vadd.i64      d19,d30
1022         vshr.u64        d24,d23,#14     @ 13
1023 #if 13<16
1024         vld1.64         {d13},[r1]!     @ handles unaligned
1025 #endif
1026         vshr.u64        d25,d23,#18
1027 #if 13>0
1028          vadd.i64       d19,d30                 @ h+=Maj from the past
1029 #endif
1030         vshr.u64        d26,d23,#41
1031         vld1.64         {d28},[r3,:64]! @ K[i++]
1032         vsli.64         d24,d23,#50
1033         vsli.64         d25,d23,#46
1034         vmov            d29,d23
1035         vsli.64         d26,d23,#23
1036 #if 13<16 && defined(__ARMEL__)
1037         vrev64.8        d13,d13
1038 #endif
1039         veor            d25,d24
1040         vbsl            d29,d16,d17             @ Ch(e,f,g)
1041         vshr.u64        d24,d19,#28
1042         veor            d26,d25                 @ Sigma1(e)
1043         vadd.i64        d27,d29,d18
1044         vshr.u64        d25,d19,#34
1045         vsli.64         d24,d19,#36
1046         vadd.i64        d27,d26
1047         vshr.u64        d26,d19,#39
1048         vadd.i64        d28,d13
1049         vsli.64         d25,d19,#30
1050         veor            d30,d19,d20
1051         vsli.64         d26,d19,#25
1052         veor            d18,d24,d25
1053         vadd.i64        d27,d28
1054         vbsl            d30,d21,d20             @ Maj(a,b,c)
1055         veor            d18,d26                 @ Sigma0(a)
1056         vadd.i64        d22,d27
1057         vadd.i64        d30,d27
1058         @ vadd.i64      d18,d30
1059         vshr.u64        d24,d22,#14     @ 14
1060 #if 14<16
1061         vld1.64         {d14},[r1]!     @ handles unaligned
1062 #endif
1063         vshr.u64        d25,d22,#18
1064 #if 14>0
1065          vadd.i64       d18,d30                 @ h+=Maj from the past
1066 #endif
1067         vshr.u64        d26,d22,#41
1068         vld1.64         {d28},[r3,:64]! @ K[i++]
1069         vsli.64         d24,d22,#50
1070         vsli.64         d25,d22,#46
1071         vmov            d29,d22
1072         vsli.64         d26,d22,#23
1073 #if 14<16 && defined(__ARMEL__)
1074         vrev64.8        d14,d14
1075 #endif
1076         veor            d25,d24
1077         vbsl            d29,d23,d16             @ Ch(e,f,g)
1078         vshr.u64        d24,d18,#28
1079         veor            d26,d25                 @ Sigma1(e)
1080         vadd.i64        d27,d29,d17
1081         vshr.u64        d25,d18,#34
1082         vsli.64         d24,d18,#36
1083         vadd.i64        d27,d26
1084         vshr.u64        d26,d18,#39
1085         vadd.i64        d28,d14
1086         vsli.64         d25,d18,#30
1087         veor            d30,d18,d19
1088         vsli.64         d26,d18,#25
1089         veor            d17,d24,d25
1090         vadd.i64        d27,d28
1091         vbsl            d30,d20,d19             @ Maj(a,b,c)
1092         veor            d17,d26                 @ Sigma0(a)
1093         vadd.i64        d21,d27
1094         vadd.i64        d30,d27
1095         @ vadd.i64      d17,d30
1096         vshr.u64        d24,d21,#14     @ 15
1097 #if 15<16
1098         vld1.64         {d15},[r1]!     @ handles unaligned
1099 #endif
1100         vshr.u64        d25,d21,#18
1101 #if 15>0
1102          vadd.i64       d17,d30                 @ h+=Maj from the past
1103 #endif
1104         vshr.u64        d26,d21,#41
1105         vld1.64         {d28},[r3,:64]! @ K[i++]
1106         vsli.64         d24,d21,#50
1107         vsli.64         d25,d21,#46
1108         vmov            d29,d21
1109         vsli.64         d26,d21,#23
1110 #if 15<16 && defined(__ARMEL__)
1111         vrev64.8        d15,d15
1112 #endif
1113         veor            d25,d24
1114         vbsl            d29,d22,d23             @ Ch(e,f,g)
1115         vshr.u64        d24,d17,#28
1116         veor            d26,d25                 @ Sigma1(e)
1117         vadd.i64        d27,d29,d16
1118         vshr.u64        d25,d17,#34
1119         vsli.64         d24,d17,#36
1120         vadd.i64        d27,d26
1121         vshr.u64        d26,d17,#39
1122         vadd.i64        d28,d15
1123         vsli.64         d25,d17,#30
1124         veor            d30,d17,d18
1125         vsli.64         d26,d17,#25
1126         veor            d16,d24,d25
1127         vadd.i64        d27,d28
1128         vbsl            d30,d19,d18             @ Maj(a,b,c)
1129         veor            d16,d26                 @ Sigma0(a)
1130         vadd.i64        d20,d27
1131         vadd.i64        d30,d27
1132         @ vadd.i64      d16,d30
1133         mov             r12,#4
1134 .L16_79_neon:
1135         subs            r12,#1
1136         vshr.u64        q12,q7,#19
1137         vshr.u64        q13,q7,#61
1138          vadd.i64       d16,d30                 @ h+=Maj from the past
1139         vshr.u64        q15,q7,#6
1140         vsli.64         q12,q7,#45
1141         vext.8          q14,q0,q1,#8    @ X[i+1]
1142         vsli.64         q13,q7,#3
1143         veor            q15,q12
1144         vshr.u64        q12,q14,#1
1145         veor            q15,q13                         @ sigma1(X[i+14])
1146         vshr.u64        q13,q14,#8
1147         vadd.i64        q0,q15
1148         vshr.u64        q15,q14,#7
1149         vsli.64         q12,q14,#63
1150         vsli.64         q13,q14,#56
1151         vext.8          q14,q4,q5,#8    @ X[i+9]
1152         veor            q15,q12
1153         vshr.u64        d24,d20,#14             @ from NEON_00_15
1154         vadd.i64        q0,q14
1155         vshr.u64        d25,d20,#18             @ from NEON_00_15
1156         veor            q15,q13                         @ sigma0(X[i+1])
1157         vshr.u64        d26,d20,#41             @ from NEON_00_15
1158         vadd.i64        q0,q15
1159         vld1.64         {d28},[r3,:64]! @ K[i++]
1160         vsli.64         d24,d20,#50
1161         vsli.64         d25,d20,#46
1162         vmov            d29,d20
1163         vsli.64         d26,d20,#23
1164 #if 16<16 && defined(__ARMEL__)
1165         vrev64.8        ,
1166 #endif
1167         veor            d25,d24
1168         vbsl            d29,d21,d22             @ Ch(e,f,g)
1169         vshr.u64        d24,d16,#28
1170         veor            d26,d25                 @ Sigma1(e)
1171         vadd.i64        d27,d29,d23
1172         vshr.u64        d25,d16,#34
1173         vsli.64         d24,d16,#36
1174         vadd.i64        d27,d26
1175         vshr.u64        d26,d16,#39
1176         vadd.i64        d28,d0
1177         vsli.64         d25,d16,#30
1178         veor            d30,d16,d17
1179         vsli.64         d26,d16,#25
1180         veor            d23,d24,d25
1181         vadd.i64        d27,d28
1182         vbsl            d30,d18,d17             @ Maj(a,b,c)
1183         veor            d23,d26                 @ Sigma0(a)
1184         vadd.i64        d19,d27
1185         vadd.i64        d30,d27
1186         @ vadd.i64      d23,d30
1187         vshr.u64        d24,d19,#14     @ 17
1188 #if 17<16
1189         vld1.64         {d1},[r1]!      @ handles unaligned
1190 #endif
1191         vshr.u64        d25,d19,#18
1192 #if 17>0
1193          vadd.i64       d23,d30                 @ h+=Maj from the past
1194 #endif
1195         vshr.u64        d26,d19,#41
1196         vld1.64         {d28},[r3,:64]! @ K[i++]
1197         vsli.64         d24,d19,#50
1198         vsli.64         d25,d19,#46
1199         vmov            d29,d19
1200         vsli.64         d26,d19,#23
1201 #if 17<16 && defined(__ARMEL__)
1202         vrev64.8        ,
1203 #endif
1204         veor            d25,d24
1205         vbsl            d29,d20,d21             @ Ch(e,f,g)
1206         vshr.u64        d24,d23,#28
1207         veor            d26,d25                 @ Sigma1(e)
1208         vadd.i64        d27,d29,d22
1209         vshr.u64        d25,d23,#34
1210         vsli.64         d24,d23,#36
1211         vadd.i64        d27,d26
1212         vshr.u64        d26,d23,#39
1213         vadd.i64        d28,d1
1214         vsli.64         d25,d23,#30
1215         veor            d30,d23,d16
1216         vsli.64         d26,d23,#25
1217         veor            d22,d24,d25
1218         vadd.i64        d27,d28
1219         vbsl            d30,d17,d16             @ Maj(a,b,c)
1220         veor            d22,d26                 @ Sigma0(a)
1221         vadd.i64        d18,d27
1222         vadd.i64        d30,d27
1223         @ vadd.i64      d22,d30
1224         vshr.u64        q12,q0,#19
1225         vshr.u64        q13,q0,#61
1226          vadd.i64       d22,d30                 @ h+=Maj from the past
1227         vshr.u64        q15,q0,#6
1228         vsli.64         q12,q0,#45
1229         vext.8          q14,q1,q2,#8    @ X[i+1]
1230         vsli.64         q13,q0,#3
1231         veor            q15,q12
1232         vshr.u64        q12,q14,#1
1233         veor            q15,q13                         @ sigma1(X[i+14])
1234         vshr.u64        q13,q14,#8
1235         vadd.i64        q1,q15
1236         vshr.u64        q15,q14,#7
1237         vsli.64         q12,q14,#63
1238         vsli.64         q13,q14,#56
1239         vext.8          q14,q5,q6,#8    @ X[i+9]
1240         veor            q15,q12
1241         vshr.u64        d24,d18,#14             @ from NEON_00_15
1242         vadd.i64        q1,q14
1243         vshr.u64        d25,d18,#18             @ from NEON_00_15
1244         veor            q15,q13                         @ sigma0(X[i+1])
1245         vshr.u64        d26,d18,#41             @ from NEON_00_15
1246         vadd.i64        q1,q15
1247         vld1.64         {d28},[r3,:64]! @ K[i++]
1248         vsli.64         d24,d18,#50
1249         vsli.64         d25,d18,#46
1250         vmov            d29,d18
1251         vsli.64         d26,d18,#23
1252 #if 18<16 && defined(__ARMEL__)
1253         vrev64.8        ,
1254 #endif
1255         veor            d25,d24
1256         vbsl            d29,d19,d20             @ Ch(e,f,g)
1257         vshr.u64        d24,d22,#28
1258         veor            d26,d25                 @ Sigma1(e)
1259         vadd.i64        d27,d29,d21
1260         vshr.u64        d25,d22,#34
1261         vsli.64         d24,d22,#36
1262         vadd.i64        d27,d26
1263         vshr.u64        d26,d22,#39
1264         vadd.i64        d28,d2
1265         vsli.64         d25,d22,#30
1266         veor            d30,d22,d23
1267         vsli.64         d26,d22,#25
1268         veor            d21,d24,d25
1269         vadd.i64        d27,d28
1270         vbsl            d30,d16,d23             @ Maj(a,b,c)
1271         veor            d21,d26                 @ Sigma0(a)
1272         vadd.i64        d17,d27
1273         vadd.i64        d30,d27
1274         @ vadd.i64      d21,d30
1275         vshr.u64        d24,d17,#14     @ 19
1276 #if 19<16
1277         vld1.64         {d3},[r1]!      @ handles unaligned
1278 #endif
1279         vshr.u64        d25,d17,#18
1280 #if 19>0
1281          vadd.i64       d21,d30                 @ h+=Maj from the past
1282 #endif
1283         vshr.u64        d26,d17,#41
1284         vld1.64         {d28},[r3,:64]! @ K[i++]
1285         vsli.64         d24,d17,#50
1286         vsli.64         d25,d17,#46
1287         vmov            d29,d17
1288         vsli.64         d26,d17,#23
1289 #if 19<16 && defined(__ARMEL__)
1290         vrev64.8        ,
1291 #endif
1292         veor            d25,d24
1293         vbsl            d29,d18,d19             @ Ch(e,f,g)
1294         vshr.u64        d24,d21,#28
1295         veor            d26,d25                 @ Sigma1(e)
1296         vadd.i64        d27,d29,d20
1297         vshr.u64        d25,d21,#34
1298         vsli.64         d24,d21,#36
1299         vadd.i64        d27,d26
1300         vshr.u64        d26,d21,#39
1301         vadd.i64        d28,d3
1302         vsli.64         d25,d21,#30
1303         veor            d30,d21,d22
1304         vsli.64         d26,d21,#25
1305         veor            d20,d24,d25
1306         vadd.i64        d27,d28
1307         vbsl            d30,d23,d22             @ Maj(a,b,c)
1308         veor            d20,d26                 @ Sigma0(a)
1309         vadd.i64        d16,d27
1310         vadd.i64        d30,d27
1311         @ vadd.i64      d20,d30
1312         vshr.u64        q12,q1,#19
1313         vshr.u64        q13,q1,#61
1314          vadd.i64       d20,d30                 @ h+=Maj from the past
1315         vshr.u64        q15,q1,#6
1316         vsli.64         q12,q1,#45
1317         vext.8          q14,q2,q3,#8    @ X[i+1]
1318         vsli.64         q13,q1,#3
1319         veor            q15,q12
1320         vshr.u64        q12,q14,#1
1321         veor            q15,q13                         @ sigma1(X[i+14])
1322         vshr.u64        q13,q14,#8
1323         vadd.i64        q2,q15
1324         vshr.u64        q15,q14,#7
1325         vsli.64         q12,q14,#63
1326         vsli.64         q13,q14,#56
1327         vext.8          q14,q6,q7,#8    @ X[i+9]
1328         veor            q15,q12
1329         vshr.u64        d24,d16,#14             @ from NEON_00_15
1330         vadd.i64        q2,q14
1331         vshr.u64        d25,d16,#18             @ from NEON_00_15
1332         veor            q15,q13                         @ sigma0(X[i+1])
1333         vshr.u64        d26,d16,#41             @ from NEON_00_15
1334         vadd.i64        q2,q15
1335         vld1.64         {d28},[r3,:64]! @ K[i++]
1336         vsli.64         d24,d16,#50
1337         vsli.64         d25,d16,#46
1338         vmov            d29,d16
1339         vsli.64         d26,d16,#23
1340 #if 20<16 && defined(__ARMEL__)
1341         vrev64.8        ,
1342 #endif
1343         veor            d25,d24
1344         vbsl            d29,d17,d18             @ Ch(e,f,g)
1345         vshr.u64        d24,d20,#28
1346         veor            d26,d25                 @ Sigma1(e)
1347         vadd.i64        d27,d29,d19
1348         vshr.u64        d25,d20,#34
1349         vsli.64         d24,d20,#36
1350         vadd.i64        d27,d26
1351         vshr.u64        d26,d20,#39
1352         vadd.i64        d28,d4
1353         vsli.64         d25,d20,#30
1354         veor            d30,d20,d21
1355         vsli.64         d26,d20,#25
1356         veor            d19,d24,d25
1357         vadd.i64        d27,d28
1358         vbsl            d30,d22,d21             @ Maj(a,b,c)
1359         veor            d19,d26                 @ Sigma0(a)
1360         vadd.i64        d23,d27
1361         vadd.i64        d30,d27
1362         @ vadd.i64      d19,d30
1363         vshr.u64        d24,d23,#14     @ 21
1364 #if 21<16
1365         vld1.64         {d5},[r1]!      @ handles unaligned
1366 #endif
1367         vshr.u64        d25,d23,#18
1368 #if 21>0
1369          vadd.i64       d19,d30                 @ h+=Maj from the past
1370 #endif
1371         vshr.u64        d26,d23,#41
1372         vld1.64         {d28},[r3,:64]! @ K[i++]
1373         vsli.64         d24,d23,#50
1374         vsli.64         d25,d23,#46
1375         vmov            d29,d23
1376         vsli.64         d26,d23,#23
1377 #if 21<16 && defined(__ARMEL__)
1378         vrev64.8        ,
1379 #endif
1380         veor            d25,d24
1381         vbsl            d29,d16,d17             @ Ch(e,f,g)
1382         vshr.u64        d24,d19,#28
1383         veor            d26,d25                 @ Sigma1(e)
1384         vadd.i64        d27,d29,d18
1385         vshr.u64        d25,d19,#34
1386         vsli.64         d24,d19,#36
1387         vadd.i64        d27,d26
1388         vshr.u64        d26,d19,#39
1389         vadd.i64        d28,d5
1390         vsli.64         d25,d19,#30
1391         veor            d30,d19,d20
1392         vsli.64         d26,d19,#25
1393         veor            d18,d24,d25
1394         vadd.i64        d27,d28
1395         vbsl            d30,d21,d20             @ Maj(a,b,c)
1396         veor            d18,d26                 @ Sigma0(a)
1397         vadd.i64        d22,d27
1398         vadd.i64        d30,d27
1399         @ vadd.i64      d18,d30
1400         vshr.u64        q12,q2,#19
1401         vshr.u64        q13,q2,#61
1402          vadd.i64       d18,d30                 @ h+=Maj from the past
1403         vshr.u64        q15,q2,#6
1404         vsli.64         q12,q2,#45
1405         vext.8          q14,q3,q4,#8    @ X[i+1]
1406         vsli.64         q13,q2,#3
1407         veor            q15,q12
1408         vshr.u64        q12,q14,#1
1409         veor            q15,q13                         @ sigma1(X[i+14])
1410         vshr.u64        q13,q14,#8
1411         vadd.i64        q3,q15
1412         vshr.u64        q15,q14,#7
1413         vsli.64         q12,q14,#63
1414         vsli.64         q13,q14,#56
1415         vext.8          q14,q7,q0,#8    @ X[i+9]
1416         veor            q15,q12
1417         vshr.u64        d24,d22,#14             @ from NEON_00_15
1418         vadd.i64        q3,q14
1419         vshr.u64        d25,d22,#18             @ from NEON_00_15
1420         veor            q15,q13                         @ sigma0(X[i+1])
1421         vshr.u64        d26,d22,#41             @ from NEON_00_15
1422         vadd.i64        q3,q15
1423         vld1.64         {d28},[r3,:64]! @ K[i++]
1424         vsli.64         d24,d22,#50
1425         vsli.64         d25,d22,#46
1426         vmov            d29,d22
1427         vsli.64         d26,d22,#23
1428 #if 22<16 && defined(__ARMEL__)
1429         vrev64.8        ,
1430 #endif
1431         veor            d25,d24
1432         vbsl            d29,d23,d16             @ Ch(e,f,g)
1433         vshr.u64        d24,d18,#28
1434         veor            d26,d25                 @ Sigma1(e)
1435         vadd.i64        d27,d29,d17
1436         vshr.u64        d25,d18,#34
1437         vsli.64         d24,d18,#36
1438         vadd.i64        d27,d26
1439         vshr.u64        d26,d18,#39
1440         vadd.i64        d28,d6
1441         vsli.64         d25,d18,#30
1442         veor            d30,d18,d19
1443         vsli.64         d26,d18,#25
1444         veor            d17,d24,d25
1445         vadd.i64        d27,d28
1446         vbsl            d30,d20,d19             @ Maj(a,b,c)
1447         veor            d17,d26                 @ Sigma0(a)
1448         vadd.i64        d21,d27
1449         vadd.i64        d30,d27
1450         @ vadd.i64      d17,d30
1451         vshr.u64        d24,d21,#14     @ 23
1452 #if 23<16
1453         vld1.64         {d7},[r1]!      @ handles unaligned
1454 #endif
1455         vshr.u64        d25,d21,#18
1456 #if 23>0
1457          vadd.i64       d17,d30                 @ h+=Maj from the past
1458 #endif
1459         vshr.u64        d26,d21,#41
1460         vld1.64         {d28},[r3,:64]! @ K[i++]
1461         vsli.64         d24,d21,#50
1462         vsli.64         d25,d21,#46
1463         vmov            d29,d21
1464         vsli.64         d26,d21,#23
1465 #if 23<16 && defined(__ARMEL__)
1466         vrev64.8        ,
1467 #endif
1468         veor            d25,d24
1469         vbsl            d29,d22,d23             @ Ch(e,f,g)
1470         vshr.u64        d24,d17,#28
1471         veor            d26,d25                 @ Sigma1(e)
1472         vadd.i64        d27,d29,d16
1473         vshr.u64        d25,d17,#34
1474         vsli.64         d24,d17,#36
1475         vadd.i64        d27,d26
1476         vshr.u64        d26,d17,#39
1477         vadd.i64        d28,d7
1478         vsli.64         d25,d17,#30
1479         veor            d30,d17,d18
1480         vsli.64         d26,d17,#25
1481         veor            d16,d24,d25
1482         vadd.i64        d27,d28
1483         vbsl            d30,d19,d18             @ Maj(a,b,c)
1484         veor            d16,d26                 @ Sigma0(a)
1485         vadd.i64        d20,d27
1486         vadd.i64        d30,d27
1487         @ vadd.i64      d16,d30
1488         vshr.u64        q12,q3,#19
1489         vshr.u64        q13,q3,#61
1490          vadd.i64       d16,d30                 @ h+=Maj from the past
1491         vshr.u64        q15,q3,#6
1492         vsli.64         q12,q3,#45
1493         vext.8          q14,q4,q5,#8    @ X[i+1]
1494         vsli.64         q13,q3,#3
1495         veor            q15,q12
1496         vshr.u64        q12,q14,#1
1497         veor            q15,q13                         @ sigma1(X[i+14])
1498         vshr.u64        q13,q14,#8
1499         vadd.i64        q4,q15
1500         vshr.u64        q15,q14,#7
1501         vsli.64         q12,q14,#63
1502         vsli.64         q13,q14,#56
1503         vext.8          q14,q0,q1,#8    @ X[i+9]
1504         veor            q15,q12
1505         vshr.u64        d24,d20,#14             @ from NEON_00_15
1506         vadd.i64        q4,q14
1507         vshr.u64        d25,d20,#18             @ from NEON_00_15
1508         veor            q15,q13                         @ sigma0(X[i+1])
1509         vshr.u64        d26,d20,#41             @ from NEON_00_15
1510         vadd.i64        q4,q15
1511         vld1.64         {d28},[r3,:64]! @ K[i++]
1512         vsli.64         d24,d20,#50
1513         vsli.64         d25,d20,#46
1514         vmov            d29,d20
1515         vsli.64         d26,d20,#23
1516 #if 24<16 && defined(__ARMEL__)
1517         vrev64.8        ,
1518 #endif
1519         veor            d25,d24
1520         vbsl            d29,d21,d22             @ Ch(e,f,g)
1521         vshr.u64        d24,d16,#28
1522         veor            d26,d25                 @ Sigma1(e)
1523         vadd.i64        d27,d29,d23
1524         vshr.u64        d25,d16,#34
1525         vsli.64         d24,d16,#36
1526         vadd.i64        d27,d26
1527         vshr.u64        d26,d16,#39
1528         vadd.i64        d28,d8
1529         vsli.64         d25,d16,#30
1530         veor            d30,d16,d17
1531         vsli.64         d26,d16,#25
1532         veor            d23,d24,d25
1533         vadd.i64        d27,d28
1534         vbsl            d30,d18,d17             @ Maj(a,b,c)
1535         veor            d23,d26                 @ Sigma0(a)
1536         vadd.i64        d19,d27
1537         vadd.i64        d30,d27
1538         @ vadd.i64      d23,d30
1539         vshr.u64        d24,d19,#14     @ 25
1540 #if 25<16
1541         vld1.64         {d9},[r1]!      @ handles unaligned
1542 #endif
1543         vshr.u64        d25,d19,#18
1544 #if 25>0
1545          vadd.i64       d23,d30                 @ h+=Maj from the past
1546 #endif
1547         vshr.u64        d26,d19,#41
1548         vld1.64         {d28},[r3,:64]! @ K[i++]
1549         vsli.64         d24,d19,#50
1550         vsli.64         d25,d19,#46
1551         vmov            d29,d19
1552         vsli.64         d26,d19,#23
1553 #if 25<16 && defined(__ARMEL__)
1554         vrev64.8        ,
1555 #endif
1556         veor            d25,d24
1557         vbsl            d29,d20,d21             @ Ch(e,f,g)
1558         vshr.u64        d24,d23,#28
1559         veor            d26,d25                 @ Sigma1(e)
1560         vadd.i64        d27,d29,d22
1561         vshr.u64        d25,d23,#34
1562         vsli.64         d24,d23,#36
1563         vadd.i64        d27,d26
1564         vshr.u64        d26,d23,#39
1565         vadd.i64        d28,d9
1566         vsli.64         d25,d23,#30
1567         veor            d30,d23,d16
1568         vsli.64         d26,d23,#25
1569         veor            d22,d24,d25
1570         vadd.i64        d27,d28
1571         vbsl            d30,d17,d16             @ Maj(a,b,c)
1572         veor            d22,d26                 @ Sigma0(a)
1573         vadd.i64        d18,d27
1574         vadd.i64        d30,d27
1575         @ vadd.i64      d22,d30
1576         vshr.u64        q12,q4,#19
1577         vshr.u64        q13,q4,#61
1578          vadd.i64       d22,d30                 @ h+=Maj from the past
1579         vshr.u64        q15,q4,#6
1580         vsli.64         q12,q4,#45
1581         vext.8          q14,q5,q6,#8    @ X[i+1]
1582         vsli.64         q13,q4,#3
1583         veor            q15,q12
1584         vshr.u64        q12,q14,#1
1585         veor            q15,q13                         @ sigma1(X[i+14])
1586         vshr.u64        q13,q14,#8
1587         vadd.i64        q5,q15
1588         vshr.u64        q15,q14,#7
1589         vsli.64         q12,q14,#63
1590         vsli.64         q13,q14,#56
1591         vext.8          q14,q1,q2,#8    @ X[i+9]
1592         veor            q15,q12
1593         vshr.u64        d24,d18,#14             @ from NEON_00_15
1594         vadd.i64        q5,q14
1595         vshr.u64        d25,d18,#18             @ from NEON_00_15
1596         veor            q15,q13                         @ sigma0(X[i+1])
1597         vshr.u64        d26,d18,#41             @ from NEON_00_15
1598         vadd.i64        q5,q15
1599         vld1.64         {d28},[r3,:64]! @ K[i++]
1600         vsli.64         d24,d18,#50
1601         vsli.64         d25,d18,#46
1602         vmov            d29,d18
1603         vsli.64         d26,d18,#23
1604 #if 26<16 && defined(__ARMEL__)
1605         vrev64.8        ,
1606 #endif
1607         veor            d25,d24
1608         vbsl            d29,d19,d20             @ Ch(e,f,g)
1609         vshr.u64        d24,d22,#28
1610         veor            d26,d25                 @ Sigma1(e)
1611         vadd.i64        d27,d29,d21
1612         vshr.u64        d25,d22,#34
1613         vsli.64         d24,d22,#36
1614         vadd.i64        d27,d26
1615         vshr.u64        d26,d22,#39
1616         vadd.i64        d28,d10
1617         vsli.64         d25,d22,#30
1618         veor            d30,d22,d23
1619         vsli.64         d26,d22,#25
1620         veor            d21,d24,d25
1621         vadd.i64        d27,d28
1622         vbsl            d30,d16,d23             @ Maj(a,b,c)
1623         veor            d21,d26                 @ Sigma0(a)
1624         vadd.i64        d17,d27
1625         vadd.i64        d30,d27
1626         @ vadd.i64      d21,d30
1627         vshr.u64        d24,d17,#14     @ 27
1628 #if 27<16
1629         vld1.64         {d11},[r1]!     @ handles unaligned
1630 #endif
1631         vshr.u64        d25,d17,#18
1632 #if 27>0
1633          vadd.i64       d21,d30                 @ h+=Maj from the past
1634 #endif
1635         vshr.u64        d26,d17,#41
1636         vld1.64         {d28},[r3,:64]! @ K[i++]
1637         vsli.64         d24,d17,#50
1638         vsli.64         d25,d17,#46
1639         vmov            d29,d17
1640         vsli.64         d26,d17,#23
1641 #if 27<16 && defined(__ARMEL__)
1642         vrev64.8        ,
1643 #endif
1644         veor            d25,d24
1645         vbsl            d29,d18,d19             @ Ch(e,f,g)
1646         vshr.u64        d24,d21,#28
1647         veor            d26,d25                 @ Sigma1(e)
1648         vadd.i64        d27,d29,d20
1649         vshr.u64        d25,d21,#34
1650         vsli.64         d24,d21,#36
1651         vadd.i64        d27,d26
1652         vshr.u64        d26,d21,#39
1653         vadd.i64        d28,d11
1654         vsli.64         d25,d21,#30
1655         veor            d30,d21,d22
1656         vsli.64         d26,d21,#25
1657         veor            d20,d24,d25
1658         vadd.i64        d27,d28
1659         vbsl            d30,d23,d22             @ Maj(a,b,c)
1660         veor            d20,d26                 @ Sigma0(a)
1661         vadd.i64        d16,d27
1662         vadd.i64        d30,d27
1663         @ vadd.i64      d20,d30
1664         vshr.u64        q12,q5,#19
1665         vshr.u64        q13,q5,#61
1666          vadd.i64       d20,d30                 @ h+=Maj from the past
1667         vshr.u64        q15,q5,#6
1668         vsli.64         q12,q5,#45
1669         vext.8          q14,q6,q7,#8    @ X[i+1]
1670         vsli.64         q13,q5,#3
1671         veor            q15,q12
1672         vshr.u64        q12,q14,#1
1673         veor            q15,q13                         @ sigma1(X[i+14])
1674         vshr.u64        q13,q14,#8
1675         vadd.i64        q6,q15
1676         vshr.u64        q15,q14,#7
1677         vsli.64         q12,q14,#63
1678         vsli.64         q13,q14,#56
1679         vext.8          q14,q2,q3,#8    @ X[i+9]
1680         veor            q15,q12
1681         vshr.u64        d24,d16,#14             @ from NEON_00_15
1682         vadd.i64        q6,q14
1683         vshr.u64        d25,d16,#18             @ from NEON_00_15
1684         veor            q15,q13                         @ sigma0(X[i+1])
1685         vshr.u64        d26,d16,#41             @ from NEON_00_15
1686         vadd.i64        q6,q15
1687         vld1.64         {d28},[r3,:64]! @ K[i++]
1688         vsli.64         d24,d16,#50
1689         vsli.64         d25,d16,#46
1690         vmov            d29,d16
1691         vsli.64         d26,d16,#23
1692 #if 28<16 && defined(__ARMEL__)
1693         vrev64.8        ,
1694 #endif
1695         veor            d25,d24
1696         vbsl            d29,d17,d18             @ Ch(e,f,g)
1697         vshr.u64        d24,d20,#28
1698         veor            d26,d25                 @ Sigma1(e)
1699         vadd.i64        d27,d29,d19
1700         vshr.u64        d25,d20,#34
1701         vsli.64         d24,d20,#36
1702         vadd.i64        d27,d26
1703         vshr.u64        d26,d20,#39
1704         vadd.i64        d28,d12
1705         vsli.64         d25,d20,#30
1706         veor            d30,d20,d21
1707         vsli.64         d26,d20,#25
1708         veor            d19,d24,d25
1709         vadd.i64        d27,d28
1710         vbsl            d30,d22,d21             @ Maj(a,b,c)
1711         veor            d19,d26                 @ Sigma0(a)
1712         vadd.i64        d23,d27
1713         vadd.i64        d30,d27
1714         @ vadd.i64      d19,d30
1715         vshr.u64        d24,d23,#14     @ 29
1716 #if 29<16
1717         vld1.64         {d13},[r1]!     @ handles unaligned
1718 #endif
1719         vshr.u64        d25,d23,#18
1720 #if 29>0
1721          vadd.i64       d19,d30                 @ h+=Maj from the past
1722 #endif
1723         vshr.u64        d26,d23,#41
1724         vld1.64         {d28},[r3,:64]! @ K[i++]
1725         vsli.64         d24,d23,#50
1726         vsli.64         d25,d23,#46
1727         vmov            d29,d23
1728         vsli.64         d26,d23,#23
1729 #if 29<16 && defined(__ARMEL__)
1730         vrev64.8        ,
1731 #endif
1732         veor            d25,d24
1733         vbsl            d29,d16,d17             @ Ch(e,f,g)
1734         vshr.u64        d24,d19,#28
1735         veor            d26,d25                 @ Sigma1(e)
1736         vadd.i64        d27,d29,d18
1737         vshr.u64        d25,d19,#34
1738         vsli.64         d24,d19,#36
1739         vadd.i64        d27,d26
1740         vshr.u64        d26,d19,#39
1741         vadd.i64        d28,d13
1742         vsli.64         d25,d19,#30
1743         veor            d30,d19,d20
1744         vsli.64         d26,d19,#25
1745         veor            d18,d24,d25
1746         vadd.i64        d27,d28
1747         vbsl            d30,d21,d20             @ Maj(a,b,c)
1748         veor            d18,d26                 @ Sigma0(a)
1749         vadd.i64        d22,d27
1750         vadd.i64        d30,d27
1751         @ vadd.i64      d18,d30
1752         vshr.u64        q12,q6,#19
1753         vshr.u64        q13,q6,#61
1754          vadd.i64       d18,d30                 @ h+=Maj from the past
1755         vshr.u64        q15,q6,#6
1756         vsli.64         q12,q6,#45
1757         vext.8          q14,q7,q0,#8    @ X[i+1]
1758         vsli.64         q13,q6,#3
1759         veor            q15,q12
1760         vshr.u64        q12,q14,#1
1761         veor            q15,q13                         @ sigma1(X[i+14])
1762         vshr.u64        q13,q14,#8
1763         vadd.i64        q7,q15
1764         vshr.u64        q15,q14,#7
1765         vsli.64         q12,q14,#63
1766         vsli.64         q13,q14,#56
1767         vext.8          q14,q3,q4,#8    @ X[i+9]
1768         veor            q15,q12
1769         vshr.u64        d24,d22,#14             @ from NEON_00_15
1770         vadd.i64        q7,q14
1771         vshr.u64        d25,d22,#18             @ from NEON_00_15
1772         veor            q15,q13                         @ sigma0(X[i+1])
1773         vshr.u64        d26,d22,#41             @ from NEON_00_15
1774         vadd.i64        q7,q15
1775         vld1.64         {d28},[r3,:64]! @ K[i++]
1776         vsli.64         d24,d22,#50
1777         vsli.64         d25,d22,#46
1778         vmov            d29,d22
1779         vsli.64         d26,d22,#23
1780 #if 30<16 && defined(__ARMEL__)
1781         vrev64.8        ,
1782 #endif
1783         veor            d25,d24
1784         vbsl            d29,d23,d16             @ Ch(e,f,g)
1785         vshr.u64        d24,d18,#28
1786         veor            d26,d25                 @ Sigma1(e)
1787         vadd.i64        d27,d29,d17
1788         vshr.u64        d25,d18,#34
1789         vsli.64         d24,d18,#36
1790         vadd.i64        d27,d26
1791         vshr.u64        d26,d18,#39
1792         vadd.i64        d28,d14
1793         vsli.64         d25,d18,#30
1794         veor            d30,d18,d19
1795         vsli.64         d26,d18,#25
1796         veor            d17,d24,d25
1797         vadd.i64        d27,d28
1798         vbsl            d30,d20,d19             @ Maj(a,b,c)
1799         veor            d17,d26                 @ Sigma0(a)
1800         vadd.i64        d21,d27
1801         vadd.i64        d30,d27
1802         @ vadd.i64      d17,d30
1803         vshr.u64        d24,d21,#14     @ 31
1804 #if 31<16
1805         vld1.64         {d15},[r1]!     @ handles unaligned
1806 #endif
1807         vshr.u64        d25,d21,#18
1808 #if 31>0
1809          vadd.i64       d17,d30                 @ h+=Maj from the past
1810 #endif
1811         vshr.u64        d26,d21,#41
1812         vld1.64         {d28},[r3,:64]! @ K[i++]
1813         vsli.64         d24,d21,#50
1814         vsli.64         d25,d21,#46
1815         vmov            d29,d21
1816         vsli.64         d26,d21,#23
1817 #if 31<16 && defined(__ARMEL__)
1818         vrev64.8        ,
1819 #endif
1820         veor            d25,d24
1821         vbsl            d29,d22,d23             @ Ch(e,f,g)
1822         vshr.u64        d24,d17,#28
1823         veor            d26,d25                 @ Sigma1(e)
1824         vadd.i64        d27,d29,d16
1825         vshr.u64        d25,d17,#34
1826         vsli.64         d24,d17,#36
1827         vadd.i64        d27,d26
1828         vshr.u64        d26,d17,#39
1829         vadd.i64        d28,d15
1830         vsli.64         d25,d17,#30
1831         veor            d30,d17,d18
1832         vsli.64         d26,d17,#25
1833         veor            d16,d24,d25
1834         vadd.i64        d27,d28
1835         vbsl            d30,d19,d18             @ Maj(a,b,c)
1836         veor            d16,d26                 @ Sigma0(a)
1837         vadd.i64        d20,d27
1838         vadd.i64        d30,d27
1839         @ vadd.i64      d16,d30
1840         bne             .L16_79_neon
1841
1842          vadd.i64       d16,d30         @ h+=Maj from the past
1843         vldmia          r0,{d24-d31}    @ load context to temp
1844         vadd.i64        q8,q12          @ vectorized accumulate
1845         vadd.i64        q9,q13
1846         vadd.i64        q10,q14
1847         vadd.i64        q11,q15
1848         vstmia          r0,{d16-d23}    @ save context
1849         teq             r1,r2
1850         sub             r3,#640 @ rewind K512
1851         bne             .Loop_neon
1852
1853         VFP_ABI_POP
1854         bx      lr                              @ .word 0xe12fff1e
1855 .size   sha512_block_data_order_neon,.-sha512_block_data_order_neon
1856 #endif
1857 .asciz  "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
1858 .align  2
1859 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
1860 .comm   OPENSSL_armcap_P,4,4
1861 #endif