1 /***********************************************************************
3 ** Implementation of the Skein block functions.
5 ** Source code author: Doug Whiting, 2008.
7 ** This algorithm and source code is released to the public domain.
9 ** Compile-time switches:
11 ** SKEIN_USE_ASM -- set bits (256/512/1024) to select which
12 ** versions use ASM code for block processing
13 ** [default: use C for all block sizes]
15 ************************************************************************/
17 #include <linux/string.h>
18 #include "skein_base.h"
19 #include "skein_block.h"
22 #define SKEIN_USE_ASM (0) /* default is all C code (no ASM) */
26 #define SKEIN_LOOP 001 /* default: unroll 256 and 512, but not 1024 */
29 #define BLK_BITS (WCNT * 64) /* some useful definitions for code here */
30 #define KW_TWK_BASE (0)
31 #define KW_KEY_BASE (3)
32 #define ks (kw + KW_KEY_BASE)
33 #define ts (kw + KW_TWK_BASE)
36 #define debug_save_tweak(ctx) \
38 ctx->h.tweak[0] = ts[0]; \
39 ctx->h.tweak[1] = ts[1]; \
42 #define debug_save_tweak(ctx)
45 #if !(SKEIN_USE_ASM & 256)
47 #define RCNT (SKEIN_256_ROUNDS_TOTAL / 8)
48 #ifdef SKEIN_LOOP /* configure how much to unroll the loop */
49 #define SKEIN_UNROLL_256 (((SKEIN_LOOP) / 100) % 10)
51 #define SKEIN_UNROLL_256 (0)
55 #if (RCNT % SKEIN_UNROLL_256)
56 #error "Invalid SKEIN_UNROLL_256" /* sanity check on unroll count */
59 #define ROUND256(p0, p1, p2, p3, ROT, r_num) \
62 X##p1 = rotl_64(X##p1, ROT##_0); \
65 X##p3 = rotl_64(X##p3, ROT##_1); \
69 #if SKEIN_UNROLL_256 == 0
70 #define R256(p0, p1, p2, p3, ROT, r_num) /* fully unrolled */ \
71 ROUND256(p0, p1, p2, p3, ROT, r_num)
75 /* inject the key schedule value */ \
76 X0 += ks[((R) + 1) % 5]; \
77 X1 += ks[((R) + 2) % 5] + ts[((R) + 1) % 3]; \
78 X2 += ks[((R) + 3) % 5] + ts[((R) + 2) % 3]; \
79 X3 += ks[((R) + 4) % 5] + (R) + 1; \
83 #define R256(p0, p1, p2, p3, ROT, r_num) ROUND256(p0, p1, p2, p3, ROT, r_num)
87 /* inject the key schedule value */ \
88 X0 += ks[r + (R) + 0]; \
89 X1 += ks[r + (R) + 1] + ts[r + (R) + 0];\
90 X2 += ks[r + (R) + 2] + ts[r + (R) + 1];\
91 X3 += ks[r + (R) + 3] + r + (R); \
92 /* rotate key schedule */ \
93 ks[r + (R) + 4] = ks[r + (R) - 1]; \
94 ts[r + (R) + 2] = ts[r + (R) - 1]; \
97 #define R256_8_ROUNDS(R) \
99 R256(0, 1, 2, 3, R_256_0, 8 * (R) + 1); \
100 R256(0, 3, 2, 1, R_256_1, 8 * (R) + 2); \
101 R256(0, 1, 2, 3, R_256_2, 8 * (R) + 3); \
102 R256(0, 3, 2, 1, R_256_3, 8 * (R) + 4); \
104 R256(0, 1, 2, 3, R_256_4, 8 * (R) + 5); \
105 R256(0, 3, 2, 1, R_256_5, 8 * (R) + 6); \
106 R256(0, 1, 2, 3, R_256_6, 8 * (R) + 7); \
107 R256(0, 3, 2, 1, R_256_7, 8 * (R) + 8); \
111 #define R256_UNROLL_R(NN) \
112 ((SKEIN_UNROLL_256 == 0 && \
113 SKEIN_256_ROUNDS_TOTAL / 8 > (NN)) || \
114 (SKEIN_UNROLL_256 > (NN)))
116 #if (SKEIN_UNROLL_256 > 14)
117 #error "need more unrolling in skein_256_process_block"
121 #if !(SKEIN_USE_ASM & 512)
123 #define RCNT (SKEIN_512_ROUNDS_TOTAL/8)
125 #ifdef SKEIN_LOOP /* configure how much to unroll the loop */
126 #define SKEIN_UNROLL_512 (((SKEIN_LOOP)/10)%10)
128 #define SKEIN_UNROLL_512 (0)
132 #if (RCNT % SKEIN_UNROLL_512)
133 #error "Invalid SKEIN_UNROLL_512" /* sanity check on unroll count */
136 #define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
139 X##p1 = rotl_64(X##p1, ROT##_0); \
142 X##p3 = rotl_64(X##p3, ROT##_1); \
145 X##p5 = rotl_64(X##p5, ROT##_2); \
147 X##p6 += X##p7; X##p7 = rotl_64(X##p7, ROT##_3);\
151 #if SKEIN_UNROLL_512 == 0
152 #define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) /* unrolled */ \
153 ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num)
157 /* inject the key schedule value */ \
158 X0 += ks[((R) + 1) % 9]; \
159 X1 += ks[((R) + 2) % 9]; \
160 X2 += ks[((R) + 3) % 9]; \
161 X3 += ks[((R) + 4) % 9]; \
162 X4 += ks[((R) + 5) % 9]; \
163 X5 += ks[((R) + 6) % 9] + ts[((R) + 1) % 3]; \
164 X6 += ks[((R) + 7) % 9] + ts[((R) + 2) % 3]; \
165 X7 += ks[((R) + 8) % 9] + (R) + 1; \
168 #else /* looping version */
169 #define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
170 ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
174 /* inject the key schedule value */ \
175 X0 += ks[r + (R) + 0]; \
176 X1 += ks[r + (R) + 1]; \
177 X2 += ks[r + (R) + 2]; \
178 X3 += ks[r + (R) + 3]; \
179 X4 += ks[r + (R) + 4]; \
180 X5 += ks[r + (R) + 5] + ts[r + (R) + 0]; \
181 X6 += ks[r + (R) + 6] + ts[r + (R) + 1]; \
182 X7 += ks[r + (R) + 7] + r + (R); \
183 /* rotate key schedule */ \
184 ks[r + (R) + 8] = ks[r + (R) - 1]; \
185 ts[r + (R) + 2] = ts[r + (R) - 1]; \
187 #endif /* end of looped code definitions */
188 #define R512_8_ROUNDS(R) /* do 8 full rounds */ \
190 R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_0, 8 * (R) + 1); \
191 R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_1, 8 * (R) + 2); \
192 R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_2, 8 * (R) + 3); \
193 R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_3, 8 * (R) + 4); \
195 R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_4, 8 * (R) + 5); \
196 R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_5, 8 * (R) + 6); \
197 R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_6, 8 * (R) + 7); \
198 R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_7, 8 * (R) + 8); \
199 I512(2 * (R) + 1); /* and key injection */ \
201 #define R512_UNROLL_R(NN) \
202 ((SKEIN_UNROLL_512 == 0 && \
203 SKEIN_512_ROUNDS_TOTAL/8 > (NN)) || \
204 (SKEIN_UNROLL_512 > (NN)))
206 #if (SKEIN_UNROLL_512 > 14)
207 #error "need more unrolling in skein_512_process_block"
211 #if !(SKEIN_USE_ASM & 1024)
213 #define RCNT (SKEIN_1024_ROUNDS_TOTAL/8)
214 #ifdef SKEIN_LOOP /* configure how much to unroll the loop */
215 #define SKEIN_UNROLL_1024 ((SKEIN_LOOP) % 10)
217 #define SKEIN_UNROLL_1024 (0)
220 #if (SKEIN_UNROLL_1024 != 0)
221 #if (RCNT % SKEIN_UNROLL_1024)
222 #error "Invalid SKEIN_UNROLL_1024" /* sanity check on unroll count */
225 #define ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
229 X##p1 = rotl_64(X##p1, ROT##_0); \
232 X##p3 = rotl_64(X##p3, ROT##_1); \
235 X##p5 = rotl_64(X##p5, ROT##_2); \
238 X##p7 = rotl_64(X##p7, ROT##_3); \
241 X##p9 = rotl_64(X##p9, ROT##_4); \
244 X##pB = rotl_64(X##pB, ROT##_5); \
247 X##pD = rotl_64(X##pD, ROT##_6); \
250 X##pF = rotl_64(X##pF, ROT##_7); \
254 #if SKEIN_UNROLL_1024 == 0
255 #define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
257 ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
262 /* inject the key schedule value */ \
263 X00 += ks[((R) + 1) % 17]; \
264 X01 += ks[((R) + 2) % 17]; \
265 X02 += ks[((R) + 3) % 17]; \
266 X03 += ks[((R) + 4) % 17]; \
267 X04 += ks[((R) + 5) % 17]; \
268 X05 += ks[((R) + 6) % 17]; \
269 X06 += ks[((R) + 7) % 17]; \
270 X07 += ks[((R) + 8) % 17]; \
271 X08 += ks[((R) + 9) % 17]; \
272 X09 += ks[((R) + 10) % 17]; \
273 X10 += ks[((R) + 11) % 17]; \
274 X11 += ks[((R) + 12) % 17]; \
275 X12 += ks[((R) + 13) % 17]; \
276 X13 += ks[((R) + 14) % 17] + ts[((R) + 1) % 3]; \
277 X14 += ks[((R) + 15) % 17] + ts[((R) + 2) % 3]; \
278 X15 += ks[((R) + 16) % 17] + (R) + 1; \
280 #else /* looping version */
281 #define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
283 ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
288 /* inject the key schedule value */ \
289 X00 += ks[r + (R) + 0]; \
290 X01 += ks[r + (R) + 1]; \
291 X02 += ks[r + (R) + 2]; \
292 X03 += ks[r + (R) + 3]; \
293 X04 += ks[r + (R) + 4]; \
294 X05 += ks[r + (R) + 5]; \
295 X06 += ks[r + (R) + 6]; \
296 X07 += ks[r + (R) + 7]; \
297 X08 += ks[r + (R) + 8]; \
298 X09 += ks[r + (R) + 9]; \
299 X10 += ks[r + (R) + 10]; \
300 X11 += ks[r + (R) + 11]; \
301 X12 += ks[r + (R) + 12]; \
302 X13 += ks[r + (R) + 13] + ts[r + (R) + 0]; \
303 X14 += ks[r + (R) + 14] + ts[r + (R) + 1]; \
304 X15 += ks[r + (R) + 15] + r + (R); \
305 /* rotate key schedule */ \
306 ks[r + (R) + 16] = ks[r + (R) - 1]; \
307 ts[r + (R) + 2] = ts[r + (R) - 1]; \
311 #define R1024_8_ROUNDS(R) \
313 R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \
314 13, 14, 15, R1024_0, 8*(R) + 1); \
315 R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \
316 05, 08, 01, R1024_1, 8*(R) + 2); \
317 R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \
318 11, 10, 09, R1024_2, 8*(R) + 3); \
319 R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \
320 03, 12, 07, R1024_3, 8*(R) + 4); \
322 R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \
323 13, 14, 15, R1024_4, 8*(R) + 5); \
324 R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \
325 05, 08, 01, R1024_5, 8*(R) + 6); \
326 R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \
327 11, 10, 09, R1024_6, 8*(R) + 7); \
328 R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \
329 03, 12, 07, R1024_7, 8*(R) + 8); \
333 #define R1024_UNROLL_R(NN) \
334 ((SKEIN_UNROLL_1024 == 0 && \
335 SKEIN_1024_ROUNDS_TOTAL/8 > (NN)) || \
336 (SKEIN_UNROLL_1024 > (NN)))
338 #if (SKEIN_UNROLL_1024 > 14)
339 #error "need more unrolling in Skein_1024_Process_Block"
343 /***************************** SKEIN_256 ******************************/
344 #if !(SKEIN_USE_ASM & 256)
345 void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
346 size_t blk_cnt, size_t byte_cnt_add)
349 WCNT = SKEIN_256_STATE_WORDS
353 /* key schedule: chaining vars + tweak + "rot"*/
354 u64 kw[WCNT+4+RCNT*2];
356 /* key schedule words : chaining vars + tweak */
359 u64 X0, X1, X2, X3; /* local copy of context vars, for speed */
360 u64 w[WCNT]; /* local copy of input block */
362 const u64 *X_ptr[4]; /* use for debugging (help cc put Xn in regs) */
369 skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
370 ts[0] = ctx->h.tweak[0];
371 ts[1] = ctx->h.tweak[1];
374 * this implementation only supports 2**64 input bytes
375 * (no carry out here)
377 ts[0] += byte_cnt_add; /* update processed length */
379 /* precompute the key schedule for this block */
384 ks[4] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^ SKEIN_KS_PARITY;
386 ts[2] = ts[0] ^ ts[1];
388 /* get input block in little-endian format */
389 skein_get64_lsb_first(w, blk_ptr, WCNT);
390 debug_save_tweak(ctx);
392 /* do the first full key injection */
394 X1 = w[1] + ks[1] + ts[0];
395 X2 = w[2] + ks[2] + ts[1];
398 blk_ptr += SKEIN_256_BLOCK_BYTES;
402 r < (SKEIN_UNROLL_256 ? 2 * RCNT : 2);
403 r += (SKEIN_UNROLL_256 ? 2 * SKEIN_UNROLL_256 : 1)) {
432 #if R256_UNROLL_R(10)
435 #if R256_UNROLL_R(11)
438 #if R256_UNROLL_R(12)
441 #if R256_UNROLL_R(13)
444 #if R256_UNROLL_R(14)
448 /* do the final "feedforward" xor, update context chaining */
449 ctx->x[0] = X0 ^ w[0];
450 ctx->x[1] = X1 ^ w[1];
451 ctx->x[2] = X2 ^ w[2];
452 ctx->x[3] = X3 ^ w[3];
454 ts[1] &= ~SKEIN_T1_FLAG_FIRST;
456 ctx->h.tweak[0] = ts[0];
457 ctx->h.tweak[1] = ts[1];
460 #if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
461 size_t skein_256_process_block_code_size(void)
463 return ((u8 *) skein_256_process_block_code_size) -
464 ((u8 *) skein_256_process_block);
466 unsigned int skein_256_unroll_cnt(void)
468 return SKEIN_UNROLL_256;
473 /***************************** SKEIN_512 ******************************/
474 #if !(SKEIN_USE_ASM & 512)
475 void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
476 size_t blk_cnt, size_t byte_cnt_add)
479 WCNT = SKEIN_512_STATE_WORDS
483 u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot"*/
485 u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
487 u64 X0, X1, X2, X3, X4, X5, X6, X7; /* local copies, for speed */
488 u64 w[WCNT]; /* local copy of input block */
490 const u64 *X_ptr[8]; /* use for debugging (help cc put Xn in regs) */
502 skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
503 ts[0] = ctx->h.tweak[0];
504 ts[1] = ctx->h.tweak[1];
507 * this implementation only supports 2**64 input bytes
508 * (no carry out here)
510 ts[0] += byte_cnt_add; /* update processed length */
512 /* precompute the key schedule for this block */
521 ks[8] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^
522 ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^ SKEIN_KS_PARITY;
524 ts[2] = ts[0] ^ ts[1];
526 /* get input block in little-endian format */
527 skein_get64_lsb_first(w, blk_ptr, WCNT);
528 debug_save_tweak(ctx);
530 /* do the first full key injection */
536 X5 = w[5] + ks[5] + ts[0];
537 X6 = w[6] + ks[6] + ts[1];
540 blk_ptr += SKEIN_512_BLOCK_BYTES;
544 r < (SKEIN_UNROLL_512 ? 2 * RCNT : 2);
545 r += (SKEIN_UNROLL_512 ? 2 * SKEIN_UNROLL_512 : 1)) {
576 #if R512_UNROLL_R(10)
579 #if R512_UNROLL_R(11)
582 #if R512_UNROLL_R(12)
585 #if R512_UNROLL_R(13)
588 #if R512_UNROLL_R(14)
593 /* do the final "feedforward" xor, update context chaining */
594 ctx->x[0] = X0 ^ w[0];
595 ctx->x[1] = X1 ^ w[1];
596 ctx->x[2] = X2 ^ w[2];
597 ctx->x[3] = X3 ^ w[3];
598 ctx->x[4] = X4 ^ w[4];
599 ctx->x[5] = X5 ^ w[5];
600 ctx->x[6] = X6 ^ w[6];
601 ctx->x[7] = X7 ^ w[7];
603 ts[1] &= ~SKEIN_T1_FLAG_FIRST;
605 ctx->h.tweak[0] = ts[0];
606 ctx->h.tweak[1] = ts[1];
609 #if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
610 size_t skein_512_process_block_code_size(void)
612 return ((u8 *) skein_512_process_block_code_size) -
613 ((u8 *) skein_512_process_block);
615 unsigned int skein_512_unroll_cnt(void)
617 return SKEIN_UNROLL_512;
622 /***************************** SKEIN_1024 ******************************/
623 #if !(SKEIN_USE_ASM & 1024)
624 void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
625 size_t blk_cnt, size_t byte_cnt_add)
626 { /* do it in C, always looping (unrolled is bigger AND slower!) */
628 WCNT = SKEIN_1024_STATE_WORDS
631 #if (SKEIN_UNROLL_1024 != 0)
632 u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot" */
634 u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
637 /* local copy of vars, for speed */
638 u64 X00, X01, X02, X03, X04, X05, X06, X07,
639 X08, X09, X10, X11, X12, X13, X14, X15;
640 u64 w[WCNT]; /* local copy of input block */
642 skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
643 ts[0] = ctx->h.tweak[0];
644 ts[1] = ctx->h.tweak[1];
647 * this implementation only supports 2**64 input bytes
648 * (no carry out here)
650 ts[0] += byte_cnt_add; /* update processed length */
652 /* precompute the key schedule for this block */
669 ks[16] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^
670 ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^
671 ks[8] ^ ks[9] ^ ks[10] ^ ks[11] ^
672 ks[12] ^ ks[13] ^ ks[14] ^ ks[15] ^ SKEIN_KS_PARITY;
674 ts[2] = ts[0] ^ ts[1];
676 /* get input block in little-endian format */
677 skein_get64_lsb_first(w, blk_ptr, WCNT);
678 debug_save_tweak(ctx);
680 /* do the first full key injection */
691 X10 = w[10] + ks[10];
692 X11 = w[11] + ks[11];
693 X12 = w[12] + ks[12];
694 X13 = w[13] + ks[13] + ts[0];
695 X14 = w[14] + ks[14] + ts[1];
696 X15 = w[15] + ks[15];
699 r < (SKEIN_UNROLL_1024 ? 2 * RCNT : 2);
700 r += (SKEIN_UNROLL_1024 ? 2 * SKEIN_UNROLL_1024 : 1)) {
702 #if R1024_UNROLL_R(1)
705 #if R1024_UNROLL_R(2)
708 #if R1024_UNROLL_R(3)
711 #if R1024_UNROLL_R(4)
714 #if R1024_UNROLL_R(5)
717 #if R1024_UNROLL_R(6)
720 #if R1024_UNROLL_R(7)
723 #if R1024_UNROLL_R(8)
726 #if R1024_UNROLL_R(9)
729 #if R1024_UNROLL_R(10)
732 #if R1024_UNROLL_R(11)
735 #if R1024_UNROLL_R(12)
738 #if R1024_UNROLL_R(13)
741 #if R1024_UNROLL_R(14)
745 /* do the final "feedforward" xor, update context chaining */
747 ctx->x[0] = X00 ^ w[0];
748 ctx->x[1] = X01 ^ w[1];
749 ctx->x[2] = X02 ^ w[2];
750 ctx->x[3] = X03 ^ w[3];
751 ctx->x[4] = X04 ^ w[4];
752 ctx->x[5] = X05 ^ w[5];
753 ctx->x[6] = X06 ^ w[6];
754 ctx->x[7] = X07 ^ w[7];
755 ctx->x[8] = X08 ^ w[8];
756 ctx->x[9] = X09 ^ w[9];
757 ctx->x[10] = X10 ^ w[10];
758 ctx->x[11] = X11 ^ w[11];
759 ctx->x[12] = X12 ^ w[12];
760 ctx->x[13] = X13 ^ w[13];
761 ctx->x[14] = X14 ^ w[14];
762 ctx->x[15] = X15 ^ w[15];
764 ts[1] &= ~SKEIN_T1_FLAG_FIRST;
765 blk_ptr += SKEIN_1024_BLOCK_BYTES;
767 ctx->h.tweak[0] = ts[0];
768 ctx->h.tweak[1] = ts[1];
771 #if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
772 size_t skein_1024_process_block_code_size(void)
774 return ((u8 *) skein_1024_process_block_code_size) -
775 ((u8 *) skein_1024_process_block);
777 unsigned int skein_1024_unroll_cnt(void)
779 return SKEIN_UNROLL_1024;