1 /* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy.
3 * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
8 #include <linux/linkage.h>
9 #include <asm/visasm.h>
11 #define GLOBAL_SPARE g7
13 #define GLOBAL_SPARE g5
14 #define ASI_BLK_P 0xf0
17 #define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
18 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
19 #define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
21 #define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
22 #define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
30 #define EX_LD_FP(x,y) x
37 #define EX_ST_FP(x,y) x
41 #define LOAD(type,addr,dest) type [addr], dest
45 #define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest
49 #define STORE(type,src,addr) type src, [addr]
53 #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
57 #define FUNC_NAME memcpy
68 #define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9) \
69 faligndata %f1, %f2, %f48; \
70 faligndata %f2, %f3, %f50; \
71 faligndata %f3, %f4, %f52; \
72 faligndata %f4, %f5, %f54; \
73 faligndata %f5, %f6, %f56; \
74 faligndata %f6, %f7, %f58; \
75 faligndata %f7, %f8, %f60; \
76 faligndata %f8, %f9, %f62;
78 #define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, jmptgt) \
79 EX_LD_FP(LOAD_BLK(%src, %fdest), U1_gs_80_fp); \
80 EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
81 add %src, 0x40, %src; \
82 subcc %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE; \
84 add %dest, 0x40, %dest; \
86 #define LOOP_CHUNK1(src, dest, branch_dest) \
87 MAIN_LOOP_CHUNK(src, dest, f0, f48, branch_dest)
88 #define LOOP_CHUNK2(src, dest, branch_dest) \
89 MAIN_LOOP_CHUNK(src, dest, f16, f48, branch_dest)
90 #define LOOP_CHUNK3(src, dest, branch_dest) \
91 MAIN_LOOP_CHUNK(src, dest, f32, f48, branch_dest)
93 #define DO_SYNC membar #Sync;
94 #define STORE_SYNC(dest, fsrc) \
95 EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
96 add %dest, 0x40, %dest; \
99 #define STORE_JUMP(dest, fsrc, target) \
100 EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_40_fp); \
101 add %dest, 0x40, %dest; \
102 ba,pt %xcc, target; \
105 #define FINISH_VISCHUNK(dest, f0, f1) \
108 faligndata %f0, %f1, %f48; \
109 EX_ST_FP(STORE(std, %f48, %dest), U1_g3_8_fp); \
112 #define UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
117 #define UNEVEN_VISCHUNK(dest, f0, f1) \
118 UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
121 .register %g2,#scratch
122 .register %g3,#scratch
126 #define EX_RETVAL(x) x
147 add %GLOBAL_SPARE, %g3, %o0
153 add %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
154 add %GLOBAL_SPARE, %g3, %o0
160 add %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE
161 add %GLOBAL_SPARE, %g3, %o0
189 add %GLOBAL_SPARE, %o2, %o0
193 add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
195 add %GLOBAL_SPARE, 0x8, %o0
199 add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
201 add %GLOBAL_SPARE, 0x10, %o0
228 ENTRY(U1_gs_0_o2_adj)
231 add %GLOBAL_SPARE, %o2, %o0
232 ENDPROC(U1_gs_0_o2_adj)
233 ENTRY(U1_gs_8_o2_adj)
235 add %GLOBAL_SPARE, 8, %GLOBAL_SPARE
237 add %GLOBAL_SPARE, %o2, %o0
238 ENDPROC(U1_gs_8_o2_adj)
244 .type FUNC_NAME,#function
245 FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
262 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. */
265 /* Is 'dst' already aligned on an 64-byte boundary? */
269 /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
270 * of bytes to copy to make 'dst' 64-byte aligned. We pre-
271 * subtract this from 'len'.
273 sub %o0, %o1, %GLOBAL_SPARE
281 1: subcc %g1, 0x1, %g1
282 EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U1_g1_1_fp)
283 EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE), U1_g1_1_fp)
287 add %o1, %GLOBAL_SPARE, %o0
292 alignaddr %o1, %g0, %o1
294 EX_LD_FP(LOAD(ldd, %o1, %f4), U1_g2_0_fp)
295 1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U1_g2_0_fp)
298 faligndata %f4, %f6, %f0
299 EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
303 EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U1_g2_0_fp)
306 faligndata %f6, %f4, %f0
307 EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
311 /* Destination is 64-byte aligned. */
313 membar #LoadStore | #StoreStore | #StoreLoad
315 subcc %o2, 0x40, %GLOBAL_SPARE
317 andncc %GLOBAL_SPARE, (0x40 - 1), %GLOBAL_SPARE
319 sub %o2, %GLOBAL_SPARE, %g3
320 andn %o1, (0x40 - 1), %o1
325 sub %o2, %GLOBAL_SPARE, %o2
327 add %g1, %GLOBAL_SPARE, %g1
330 EX_LD_FP(LOAD_BLK(%o1, %f0), U1_gs_0_fp)
333 EX_LD_FP(LOAD_BLK(%o1, %f16), U1_gs_0_fp)
335 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
336 EX_LD_FP(LOAD_BLK(%o1, %f32), U1_gs_80_fp)
339 /* There are 8 instances of the unrolled loop,
340 * one for each possible alignment of the
341 * source buffer. Each loop instance is 452
350 add %o3, %lo(1f - 1b), %o3
355 1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
356 LOOP_CHUNK1(o1, o0, 1f)
357 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
358 LOOP_CHUNK2(o1, o0, 2f)
359 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
360 LOOP_CHUNK3(o1, o0, 3f)
362 faligndata %f0, %f2, %f48
363 1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
365 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
366 STORE_JUMP(o0, f48, 40f)
367 2: FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
369 FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
370 STORE_JUMP(o0, f48, 48f)
371 3: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
373 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
374 STORE_JUMP(o0, f48, 56f)
376 1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
377 LOOP_CHUNK1(o1, o0, 1f)
378 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
379 LOOP_CHUNK2(o1, o0, 2f)
380 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
381 LOOP_CHUNK3(o1, o0, 3f)
383 faligndata %f2, %f4, %f48
384 1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
386 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
387 STORE_JUMP(o0, f48, 41f)
388 2: FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
390 FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
391 STORE_JUMP(o0, f48, 49f)
392 3: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
394 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
395 STORE_JUMP(o0, f48, 57f)
397 1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
398 LOOP_CHUNK1(o1, o0, 1f)
399 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
400 LOOP_CHUNK2(o1, o0, 2f)
401 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
402 LOOP_CHUNK3(o1, o0, 3f)
404 faligndata %f4, %f6, %f48
405 1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
407 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
408 STORE_JUMP(o0, f48, 42f)
409 2: FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
411 FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
412 STORE_JUMP(o0, f48, 50f)
413 3: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
415 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
416 STORE_JUMP(o0, f48, 58f)
418 1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
419 LOOP_CHUNK1(o1, o0, 1f)
420 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
421 LOOP_CHUNK2(o1, o0, 2f)
422 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
423 LOOP_CHUNK3(o1, o0, 3f)
425 faligndata %f6, %f8, %f48
426 1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
428 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
429 STORE_JUMP(o0, f48, 43f)
430 2: FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
432 FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
433 STORE_JUMP(o0, f48, 51f)
434 3: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
436 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
437 STORE_JUMP(o0, f48, 59f)
439 1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
440 LOOP_CHUNK1(o1, o0, 1f)
441 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
442 LOOP_CHUNK2(o1, o0, 2f)
443 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
444 LOOP_CHUNK3(o1, o0, 3f)
446 faligndata %f8, %f10, %f48
447 1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
449 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
450 STORE_JUMP(o0, f48, 44f)
451 2: FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
453 FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
454 STORE_JUMP(o0, f48, 52f)
455 3: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
457 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
458 STORE_JUMP(o0, f48, 60f)
460 1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
461 LOOP_CHUNK1(o1, o0, 1f)
462 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
463 LOOP_CHUNK2(o1, o0, 2f)
464 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
465 LOOP_CHUNK3(o1, o0, 3f)
467 faligndata %f10, %f12, %f48
468 1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
470 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
471 STORE_JUMP(o0, f48, 45f)
472 2: FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
474 FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
475 STORE_JUMP(o0, f48, 53f)
476 3: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
478 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
479 STORE_JUMP(o0, f48, 61f)
481 1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
482 LOOP_CHUNK1(o1, o0, 1f)
483 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
484 LOOP_CHUNK2(o1, o0, 2f)
485 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
486 LOOP_CHUNK3(o1, o0, 3f)
488 faligndata %f12, %f14, %f48
489 1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
491 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
492 STORE_JUMP(o0, f48, 46f)
493 2: FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
495 FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
496 STORE_JUMP(o0, f48, 54f)
497 3: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
499 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
500 STORE_JUMP(o0, f48, 62f)
502 1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
503 LOOP_CHUNK1(o1, o0, 1f)
504 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
505 LOOP_CHUNK2(o1, o0, 2f)
506 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
507 LOOP_CHUNK3(o1, o0, 3f)
509 faligndata %f14, %f16, %f48
510 1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
512 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
513 STORE_JUMP(o0, f48, 47f)
514 2: FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
516 FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
517 STORE_JUMP(o0, f48, 55f)
518 3: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
520 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
521 STORE_JUMP(o0, f48, 63f)
523 40: FINISH_VISCHUNK(o0, f0, f2)
524 41: FINISH_VISCHUNK(o0, f2, f4)
525 42: FINISH_VISCHUNK(o0, f4, f6)
526 43: FINISH_VISCHUNK(o0, f6, f8)
527 44: FINISH_VISCHUNK(o0, f8, f10)
528 45: FINISH_VISCHUNK(o0, f10, f12)
529 46: FINISH_VISCHUNK(o0, f12, f14)
530 47: UNEVEN_VISCHUNK(o0, f14, f0)
531 48: FINISH_VISCHUNK(o0, f16, f18)
532 49: FINISH_VISCHUNK(o0, f18, f20)
533 50: FINISH_VISCHUNK(o0, f20, f22)
534 51: FINISH_VISCHUNK(o0, f22, f24)
535 52: FINISH_VISCHUNK(o0, f24, f26)
536 53: FINISH_VISCHUNK(o0, f26, f28)
537 54: FINISH_VISCHUNK(o0, f28, f30)
538 55: UNEVEN_VISCHUNK(o0, f30, f0)
539 56: FINISH_VISCHUNK(o0, f32, f34)
540 57: FINISH_VISCHUNK(o0, f34, f36)
541 58: FINISH_VISCHUNK(o0, f36, f38)
542 59: FINISH_VISCHUNK(o0, f38, f40)
543 60: FINISH_VISCHUNK(o0, f40, f42)
544 61: FINISH_VISCHUNK(o0, f42, f44)
545 62: FINISH_VISCHUNK(o0, f44, f46)
546 63: UNEVEN_VISCHUNK_LAST(o0, f46, f0)
548 93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp)
551 faligndata %f0, %f2, %f8
552 EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
555 EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp)
558 faligndata %f2, %f0, %f8
559 EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
566 1: EX_LD_FP(LOAD(ldub, %o1, %o3), U1_o2_0_fp)
569 EX_ST_FP(STORE(stb, %o3, %o0), U1_o2_1_fp)
573 2: membar #StoreLoad | #StoreStore
576 mov EX_RETVAL(%o4), %o0
579 70: /* 16 < len <= (5 * 64) */
583 72: andn %o2, 0xf, %GLOBAL_SPARE
585 1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U1_gs_0)
586 EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U1_gs_0)
587 subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
588 EX_ST(STORE(stx, %o5, %o1 + %o3), U1_gs_10)
590 EX_ST(STORE(stx, %g1, %o1 + %o3), U1_gs_8)
593 73: andcc %o2, 0x8, %g0
596 EX_LD(LOAD(ldx, %o1, %o5), U1_o2_0)
598 EX_ST(STORE(stx, %o5, %o1 + %o3), U1_o2_8)
600 1: andcc %o2, 0x4, %g0
603 EX_LD(LOAD(lduw, %o1, %o5), U1_o2_0)
605 EX_ST(STORE(stw, %o5, %o1 + %o3), U1_o2_4)
613 75: andcc %o0, 0x7, %g1
619 1: EX_LD(LOAD(ldub, %o1, %o5), U1_g1_0)
621 EX_ST(STORE(stb, %o5, %o1 + %o3), U1_g1_1)
637 EX_LD(LOAD(ldx, %o1, %g2), U1_o2_0)
639 andn %o2, 0x7, %GLOBAL_SPARE
641 1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U1_gs_0_o2_adj)
642 subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
646 EX_ST(STORE(stx, %o5, %o0), U1_gs_8_o2_adj)
659 80: /* 0 < len <= 16 */
664 1: EX_LD(LOAD(lduw, %o1, %g1), U1_o2_0)
666 EX_ST(STORE(stw, %g1, %o1 + %o3), U1_o2_4)
671 mov EX_RETVAL(%o4), %o0
674 90: EX_LD(LOAD(ldub, %o1, %g1), U1_o2_0)
676 EX_ST(STORE(stb, %g1, %o1 + %o3), U1_o2_1)
680 mov EX_RETVAL(%o4), %o0
682 .size FUNC_NAME, .-FUNC_NAME