1 /* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
3 * Copyright © 2000 SuSE, Inc.
4 * Copyright © 2007 Red Hat, Inc.
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that
9 * copyright notice and this permission notice appear in supporting
10 * documentation, and that the name of SuSE not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. SuSE makes no representations about the
13 * suitability of this software for any purpose. It is provided "as is"
14 * without express or implied warranty.
16 * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
18 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
20 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
21 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 * Author: Keith Packard, SuSE, Inc.
31 #include "pixman-private.h"
32 #include "pixman-combine32.h"
33 #include "pixman-inlines.h"
35 static force_inline uint32_t
38 if (((uintptr_t)a) & 1)
40 #ifdef WORDS_BIGENDIAN
41 return (*a << 16) | (*(uint16_t *)(a + 1));
43 return *a | (*(uint16_t *)(a + 1) << 8);
48 #ifdef WORDS_BIGENDIAN
49 return (*(uint16_t *)a << 8) | *(a + 2);
51 return *(uint16_t *)a | (*(a + 2) << 16);
56 static force_inline void
60 if (((uintptr_t)a) & 1)
62 #ifdef WORDS_BIGENDIAN
63 *a = (uint8_t) (v >> 16);
64 *(uint16_t *)(a + 1) = (uint16_t) (v);
67 *(uint16_t *)(a + 1) = (uint16_t) (v >> 8);
72 #ifdef WORDS_BIGENDIAN
73 *(uint16_t *)a = (uint16_t)(v >> 8);
74 *(a + 2) = (uint8_t)v;
76 *(uint16_t *)a = (uint16_t)v;
77 *(a + 2) = (uint8_t)(v >> 16);
82 static force_inline uint32_t
86 uint32_t a = ~src >> 24;
88 UN8x4_MUL_UN8_ADD_UN8x4 (dest, a, src);
93 static force_inline uint32_t
110 fast_composite_over_x888_8_8888 (pixman_implementation_t *imp,
111 pixman_composite_info_t *info)
113 PIXMAN_COMPOSITE_ARGS (info);
114 uint32_t *src, *src_line;
115 uint32_t *dst, *dst_line;
116 uint8_t *mask, *mask_line;
117 int src_stride, mask_stride, dst_stride;
122 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
123 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
124 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
129 src_line += src_stride;
131 dst_line += dst_stride;
133 mask_line += mask_stride;
141 s = *src | 0xff000000;
150 *dst = over (d, *dst);
160 fast_composite_in_n_8_8 (pixman_implementation_t *imp,
161 pixman_composite_info_t *info)
163 PIXMAN_COMPOSITE_ARGS (info);
165 uint8_t *dst_line, *dst;
166 uint8_t *mask_line, *mask, m;
167 int dst_stride, mask_stride;
171 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
175 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
176 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
183 dst_line += dst_stride;
185 mask_line += mask_stride;
195 *dst = MUL_UN8 (m, *dst, t);
206 dst_line += dst_stride;
208 mask_line += mask_stride;
214 m = MUL_UN8 (m, srca, t);
219 *dst = MUL_UN8 (m, *dst, t);
228 fast_composite_in_8_8 (pixman_implementation_t *imp,
229 pixman_composite_info_t *info)
231 PIXMAN_COMPOSITE_ARGS (info);
232 uint8_t *dst_line, *dst;
233 uint8_t *src_line, *src;
234 int dst_stride, src_stride;
239 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
240 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
245 dst_line += dst_stride;
247 src_line += src_stride;
257 *dst = MUL_UN8 (s, *dst, t);
265 fast_composite_over_n_8_8888 (pixman_implementation_t *imp,
266 pixman_composite_info_t *info)
268 PIXMAN_COMPOSITE_ARGS (info);
270 uint32_t *dst_line, *dst, d;
271 uint8_t *mask_line, *mask, m;
272 int dst_stride, mask_stride;
275 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
281 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
282 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
287 dst_line += dst_stride;
289 mask_line += mask_stride;
300 *dst = over (src, *dst);
305 *dst = over (d, *dst);
313 fast_composite_add_n_8888_8888_ca (pixman_implementation_t *imp,
314 pixman_composite_info_t *info)
316 PIXMAN_COMPOSITE_ARGS (info);
318 uint32_t *dst_line, *dst, d;
319 uint32_t *mask_line, *mask, ma;
320 int dst_stride, mask_stride;
323 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
328 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
329 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
334 dst_line += dst_stride;
336 mask_line += mask_stride;
348 UN8x4_MUL_UN8x4_ADD_UN8x4 (s, ma, d);
359 fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
360 pixman_composite_info_t *info)
362 PIXMAN_COMPOSITE_ARGS (info);
363 uint32_t src, srca, s;
364 uint32_t *dst_line, *dst, d;
365 uint32_t *mask_line, *mask, ma;
366 int dst_stride, mask_stride;
369 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
375 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
376 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
381 dst_line += dst_stride;
383 mask_line += mask_stride;
389 if (ma == 0xffffffff)
394 *dst = over (src, *dst);
401 UN8x4_MUL_UN8x4 (s, ma);
402 UN8x4_MUL_UN8 (ma, srca);
404 UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s);
415 fast_composite_over_n_8_0888 (pixman_implementation_t *imp,
416 pixman_composite_info_t *info)
418 PIXMAN_COMPOSITE_ARGS (info);
420 uint8_t *dst_line, *dst;
422 uint8_t *mask_line, *mask, m;
423 int dst_stride, mask_stride;
426 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
432 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3);
433 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
438 dst_line += dst_stride;
440 mask_line += mask_stride;
461 d = over (in (src, m), fetch_24 (dst));
470 fast_composite_over_n_8_0565 (pixman_implementation_t *imp,
471 pixman_composite_info_t *info)
473 PIXMAN_COMPOSITE_ARGS (info);
475 uint16_t *dst_line, *dst;
477 uint8_t *mask_line, *mask, m;
478 int dst_stride, mask_stride;
481 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
487 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
488 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
493 dst_line += dst_stride;
495 mask_line += mask_stride;
510 d = over (src, convert_0565_to_0888 (d));
512 *dst = convert_8888_to_0565 (d);
517 d = over (in (src, m), convert_0565_to_0888 (d));
518 *dst = convert_8888_to_0565 (d);
526 fast_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
527 pixman_composite_info_t *info)
529 PIXMAN_COMPOSITE_ARGS (info);
530 uint32_t src, srca, s;
532 uint16_t *dst_line, *dst;
534 uint32_t *mask_line, *mask, ma;
535 int dst_stride, mask_stride;
538 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
544 src16 = convert_8888_to_0565 (src);
546 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
547 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
552 dst_line += dst_stride;
554 mask_line += mask_stride;
560 if (ma == 0xffffffff)
569 d = over (src, convert_0565_to_0888 (d));
570 *dst = convert_8888_to_0565 (d);
576 d = convert_0565_to_0888 (d);
580 UN8x4_MUL_UN8x4 (s, ma);
581 UN8x4_MUL_UN8 (ma, srca);
583 UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s);
585 *dst = convert_8888_to_0565 (d);
593 fast_composite_over_8888_8888 (pixman_implementation_t *imp,
594 pixman_composite_info_t *info)
596 PIXMAN_COMPOSITE_ARGS (info);
597 uint32_t *dst_line, *dst;
598 uint32_t *src_line, *src, s;
599 int dst_stride, src_stride;
603 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
604 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
609 dst_line += dst_stride;
611 src_line += src_stride;
621 *dst = over (s, *dst);
628 fast_composite_src_x888_8888 (pixman_implementation_t *imp,
629 pixman_composite_info_t *info)
631 PIXMAN_COMPOSITE_ARGS (info);
632 uint32_t *dst_line, *dst;
633 uint32_t *src_line, *src;
634 int dst_stride, src_stride;
637 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
638 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
643 dst_line += dst_stride;
645 src_line += src_stride;
649 *dst++ = (*src++) | 0xff000000;
655 fast_composite_over_8888_0888 (pixman_implementation_t *imp,
656 pixman_composite_info_t *info)
658 PIXMAN_COMPOSITE_ARGS (info);
659 uint8_t *dst_line, *dst;
661 uint32_t *src_line, *src, s;
663 int dst_stride, src_stride;
666 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3);
667 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
672 dst_line += dst_stride;
674 src_line += src_stride;
686 d = over (s, fetch_24 (dst));
697 fast_composite_over_8888_0565 (pixman_implementation_t *imp,
698 pixman_composite_info_t *info)
700 PIXMAN_COMPOSITE_ARGS (info);
701 uint16_t *dst_line, *dst;
703 uint32_t *src_line, *src, s;
705 int dst_stride, src_stride;
708 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
709 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
714 dst_line += dst_stride;
716 src_line += src_stride;
732 d = over (s, convert_0565_to_0888 (d));
734 *dst = convert_8888_to_0565 (d);
742 fast_composite_add_8_8 (pixman_implementation_t *imp,
743 pixman_composite_info_t *info)
745 PIXMAN_COMPOSITE_ARGS (info);
746 uint8_t *dst_line, *dst;
747 uint8_t *src_line, *src;
748 int dst_stride, src_stride;
753 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
754 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
759 dst_line += dst_stride;
761 src_line += src_stride;
773 s = t | (0 - (t >> 8));
783 fast_composite_add_0565_0565 (pixman_implementation_t *imp,
784 pixman_composite_info_t *info)
786 PIXMAN_COMPOSITE_ARGS (info);
787 uint16_t *dst_line, *dst;
789 uint16_t *src_line, *src;
791 int dst_stride, src_stride;
794 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t, src_stride, src_line, 1);
795 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
800 dst_line += dst_stride;
802 src_line += src_stride;
811 s = convert_0565_to_8888 (s);
814 d = convert_0565_to_8888 (d);
815 UN8x4_ADD_UN8x4 (s, d);
817 *dst = convert_8888_to_0565 (s);
825 fast_composite_add_8888_8888 (pixman_implementation_t *imp,
826 pixman_composite_info_t *info)
828 PIXMAN_COMPOSITE_ARGS (info);
829 uint32_t *dst_line, *dst;
830 uint32_t *src_line, *src;
831 int dst_stride, src_stride;
835 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
836 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
841 dst_line += dst_stride;
843 src_line += src_stride;
855 UN8x4_ADD_UN8x4 (s, d);
865 fast_composite_add_n_8_8 (pixman_implementation_t *imp,
866 pixman_composite_info_t *info)
868 PIXMAN_COMPOSITE_ARGS (info);
869 uint8_t *dst_line, *dst;
870 uint8_t *mask_line, *mask;
871 int dst_stride, mask_stride;
876 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
877 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
878 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
884 dst_line += dst_stride;
886 mask_line += mask_stride;
899 m = MUL_UN8 (sa, a, tmp);
900 r = ADD_UN8 (m, d, tmp);
907 #ifdef WORDS_BIGENDIAN
908 #define CREATE_BITMASK(n) (0x80000000 >> (n))
909 #define UPDATE_BITMASK(n) ((n) >> 1)
911 #define CREATE_BITMASK(n) (1 << (n))
912 #define UPDATE_BITMASK(n) ((n) << 1)
915 #define TEST_BIT(p, n) \
916 (*((p) + ((n) >> 5)) & CREATE_BITMASK ((n) & 31))
917 #define SET_BIT(p, n) \
918 do { *((p) + ((n) >> 5)) |= CREATE_BITMASK ((n) & 31); } while (0);
921 fast_composite_add_1_1 (pixman_implementation_t *imp,
922 pixman_composite_info_t *info)
924 PIXMAN_COMPOSITE_ARGS (info);
925 uint32_t *dst_line, *dst;
926 uint32_t *src_line, *src;
927 int dst_stride, src_stride;
930 PIXMAN_IMAGE_GET_LINE (src_image, 0, src_y, uint32_t,
931 src_stride, src_line, 1);
932 PIXMAN_IMAGE_GET_LINE (dest_image, 0, dest_y, uint32_t,
933 dst_stride, dst_line, 1);
938 dst_line += dst_stride;
940 src_line += src_stride;
946 * TODO: improve performance by processing uint32_t data instead
949 if (TEST_BIT (src, src_x + w))
950 SET_BIT (dst, dest_x + w);
956 fast_composite_over_n_1_8888 (pixman_implementation_t *imp,
957 pixman_composite_info_t *info)
959 PIXMAN_COMPOSITE_ARGS (info);
961 uint32_t *dst, *dst_line;
962 uint32_t *mask, *mask_line;
963 int mask_stride, dst_stride;
964 uint32_t bitcache, bitmask;
970 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
975 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t,
976 dst_stride, dst_line, 1);
977 PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,
978 mask_stride, mask_line, 1);
979 mask_line += mask_x >> 5;
986 dst_line += dst_stride;
988 mask_line += mask_stride;
992 bitmask = CREATE_BITMASK (mask_x & 31);
999 bitmask = CREATE_BITMASK (0);
1001 if (bitcache & bitmask)
1003 bitmask = UPDATE_BITMASK (bitmask);
1013 dst_line += dst_stride;
1015 mask_line += mask_stride;
1019 bitmask = CREATE_BITMASK (mask_x & 31);
1026 bitmask = CREATE_BITMASK (0);
1028 if (bitcache & bitmask)
1029 *dst = over (src, *dst);
1030 bitmask = UPDATE_BITMASK (bitmask);
1038 fast_composite_over_n_1_0565 (pixman_implementation_t *imp,
1039 pixman_composite_info_t *info)
1041 PIXMAN_COMPOSITE_ARGS (info);
1043 uint16_t *dst, *dst_line;
1044 uint32_t *mask, *mask_line;
1045 int mask_stride, dst_stride;
1046 uint32_t bitcache, bitmask;
1054 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1059 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t,
1060 dst_stride, dst_line, 1);
1061 PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,
1062 mask_stride, mask_line, 1);
1063 mask_line += mask_x >> 5;
1067 src565 = convert_8888_to_0565 (src);
1071 dst_line += dst_stride;
1073 mask_line += mask_stride;
1077 bitmask = CREATE_BITMASK (mask_x & 31);
1084 bitmask = CREATE_BITMASK (0);
1086 if (bitcache & bitmask)
1088 bitmask = UPDATE_BITMASK (bitmask);
1098 dst_line += dst_stride;
1100 mask_line += mask_stride;
1104 bitmask = CREATE_BITMASK (mask_x & 31);
1111 bitmask = CREATE_BITMASK (0);
1113 if (bitcache & bitmask)
1115 d = over (src, convert_0565_to_0888 (*dst));
1116 *dst = convert_8888_to_0565 (d);
1118 bitmask = UPDATE_BITMASK (bitmask);
1130 fast_composite_solid_fill (pixman_implementation_t *imp,
1131 pixman_composite_info_t *info)
1133 PIXMAN_COMPOSITE_ARGS (info);
1136 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1138 if (dest_image->bits.format == PIXMAN_a1)
1142 else if (dest_image->bits.format == PIXMAN_a8)
1146 else if (dest_image->bits.format == PIXMAN_r5g6b5 ||
1147 dest_image->bits.format == PIXMAN_b5g6r5)
1149 src = convert_8888_to_0565 (src);
1152 pixman_fill (dest_image->bits.bits, dest_image->bits.rowstride,
1153 PIXMAN_FORMAT_BPP (dest_image->bits.format),
1160 fast_composite_src_memcpy (pixman_implementation_t *imp,
1161 pixman_composite_info_t *info)
1163 PIXMAN_COMPOSITE_ARGS (info);
1164 int bpp = PIXMAN_FORMAT_BPP (dest_image->bits.format) / 8;
1165 uint32_t n_bytes = width * bpp;
1166 int dst_stride, src_stride;
1170 src_stride = src_image->bits.rowstride * 4;
1171 dst_stride = dest_image->bits.rowstride * 4;
1173 src = (uint8_t *)src_image->bits.bits + src_y * src_stride + src_x * bpp;
1174 dst = (uint8_t *)dest_image->bits.bits + dest_y * dst_stride + dest_x * bpp;
1178 memcpy (dst, src, n_bytes);
1185 FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, SRC, COVER)
1186 FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, SRC, NONE)
1187 FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, SRC, PAD)
1188 FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, SRC, NORMAL)
1189 FAST_NEAREST (x888_8888_cover, x888, 8888, uint32_t, uint32_t, SRC, COVER)
1190 FAST_NEAREST (x888_8888_pad, x888, 8888, uint32_t, uint32_t, SRC, PAD)
1191 FAST_NEAREST (x888_8888_normal, x888, 8888, uint32_t, uint32_t, SRC, NORMAL)
1192 FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, OVER, COVER)
1193 FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, OVER, NONE)
1194 FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, OVER, PAD)
1195 FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, OVER, NORMAL)
1196 FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, SRC, COVER)
1197 FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, SRC, NONE)
1198 FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, SRC, PAD)
1199 FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, SRC, NORMAL)
1200 FAST_NEAREST (565_565_normal, 0565, 0565, uint16_t, uint16_t, SRC, NORMAL)
1201 FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, OVER, COVER)
1202 FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, OVER, NONE)
1203 FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, OVER, PAD)
1204 FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, OVER, NORMAL)
1206 #define REPEAT_MIN_WIDTH 32
1209 fast_composite_tiled_repeat (pixman_implementation_t *imp,
1210 pixman_composite_info_t *info)
1212 PIXMAN_COMPOSITE_ARGS (info);
1213 pixman_composite_func_t func;
1214 pixman_format_code_t mask_format;
1215 uint32_t src_flags, mask_flags;
1217 int32_t width_remain;
1221 pixman_image_t extended_src_image;
1222 uint32_t extended_src[REPEAT_MIN_WIDTH * 2];
1223 pixman_bool_t need_src_extension;
1227 pixman_composite_info_t info2 = *info;
1229 src_flags = (info->src_flags & ~FAST_PATH_NORMAL_REPEAT) |
1230 FAST_PATH_SAMPLES_COVER_CLIP_NEAREST;
1234 mask_format = mask_image->common.extended_format_code;
1235 mask_flags = info->mask_flags;
1239 mask_format = PIXMAN_null;
1240 mask_flags = FAST_PATH_IS_OPAQUE;
1243 _pixman_implementation_lookup_composite (
1244 imp->toplevel, info->op,
1245 src_image->common.extended_format_code, src_flags,
1246 mask_format, mask_flags,
1247 dest_image->common.extended_format_code, info->dest_flags,
1250 src_bpp = PIXMAN_FORMAT_BPP (src_image->bits.format);
1252 if (src_image->bits.width < REPEAT_MIN_WIDTH &&
1253 (src_bpp == 32 || src_bpp == 16 || src_bpp == 8) &&
1254 !src_image->bits.indexed)
1257 sx = MOD (sx, src_image->bits.width);
1261 while (src_width < REPEAT_MIN_WIDTH && src_width <= sx)
1262 src_width += src_image->bits.width;
1264 src_stride = (src_width * (src_bpp >> 3) + 3) / (int) sizeof (uint32_t);
1266 /* Initialize/validate stack-allocated temporary image */
1267 _pixman_bits_image_init (&extended_src_image, src_image->bits.format,
1268 src_width, 1, &extended_src[0], src_stride,
1270 _pixman_image_validate (&extended_src_image);
1272 info2.src_image = &extended_src_image;
1273 need_src_extension = TRUE;
1277 src_width = src_image->bits.width;
1278 need_src_extension = FALSE;
1284 while (--height >= 0)
1286 sx = MOD (sx, src_width);
1287 sy = MOD (sy, src_image->bits.height);
1289 if (need_src_extension)
1293 PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint32_t, src_stride, src_line, 1);
1295 for (i = 0; i < src_width; )
1297 for (j = 0; j < src_image->bits.width; j++, i++)
1298 extended_src[i] = src_line[j];
1301 else if (src_bpp == 16)
1303 uint16_t *src_line_16;
1305 PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint16_t, src_stride,
1307 src_line = (uint32_t*)src_line_16;
1309 for (i = 0; i < src_width; )
1311 for (j = 0; j < src_image->bits.width; j++, i++)
1312 ((uint16_t*)extended_src)[i] = ((uint16_t*)src_line)[j];
1315 else if (src_bpp == 8)
1317 uint8_t *src_line_8;
1319 PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint8_t, src_stride,
1321 src_line = (uint32_t*)src_line_8;
1323 for (i = 0; i < src_width; )
1325 for (j = 0; j < src_image->bits.width; j++, i++)
1326 ((uint8_t*)extended_src)[i] = ((uint8_t*)src_line)[j];
1337 width_remain = width;
1339 while (width_remain > 0)
1341 num_pixels = src_width - sx;
1343 if (num_pixels > width_remain)
1344 num_pixels = width_remain;
1347 info2.width = num_pixels;
1352 width_remain -= num_pixels;
1353 info2.mask_x += num_pixels;
1354 info2.dest_x += num_pixels;
1360 info2.mask_x = info->mask_x;
1362 info2.dest_x = info->dest_x;
1366 if (need_src_extension)
1367 _pixman_image_fini (&extended_src_image);
1370 /* Use more unrolling for src_0565_0565 because it is typically CPU bound */
1371 static force_inline void
1372 scaled_nearest_scanline_565_565_SRC (uint16_t * dst,
1373 const uint16_t * src,
1376 pixman_fixed_t unit_x,
1377 pixman_fixed_t max_vx,
1378 pixman_bool_t fully_transparent_src)
1380 uint16_t tmp1, tmp2, tmp3, tmp4;
1381 while ((w -= 4) >= 0)
1383 tmp1 = *(src + pixman_fixed_to_int (vx));
1385 tmp2 = *(src + pixman_fixed_to_int (vx));
1387 tmp3 = *(src + pixman_fixed_to_int (vx));
1389 tmp4 = *(src + pixman_fixed_to_int (vx));
1398 tmp1 = *(src + pixman_fixed_to_int (vx));
1400 tmp2 = *(src + pixman_fixed_to_int (vx));
1406 *dst = *(src + pixman_fixed_to_int (vx));
1409 FAST_NEAREST_MAINLOOP (565_565_cover_SRC,
1410 scaled_nearest_scanline_565_565_SRC,
1411 uint16_t, uint16_t, COVER)
1412 FAST_NEAREST_MAINLOOP (565_565_none_SRC,
1413 scaled_nearest_scanline_565_565_SRC,
1414 uint16_t, uint16_t, NONE)
1415 FAST_NEAREST_MAINLOOP (565_565_pad_SRC,
1416 scaled_nearest_scanline_565_565_SRC,
1417 uint16_t, uint16_t, PAD)
1419 static force_inline uint32_t
1420 fetch_nearest (pixman_repeat_t src_repeat,
1421 pixman_format_code_t format,
1422 uint32_t *src, int x, int src_width)
1424 if (repeat (src_repeat, &x, src_width))
1426 if (format == PIXMAN_x8r8g8b8 || format == PIXMAN_x8b8g8r8)
1427 return *(src + x) | 0xff000000;
1437 static force_inline void
1438 combine_over (uint32_t s, uint32_t *dst)
1442 uint8_t ia = 0xff - (s >> 24);
1445 UN8x4_MUL_UN8_ADD_UN8x4 (*dst, ia, s);
1451 static force_inline void
1452 combine_src (uint32_t s, uint32_t *dst)
1458 fast_composite_scaled_nearest (pixman_implementation_t *imp,
1459 pixman_composite_info_t *info)
1461 PIXMAN_COMPOSITE_ARGS (info);
1464 int dst_stride, src_stride;
1465 int src_width, src_height;
1466 pixman_repeat_t src_repeat;
1467 pixman_fixed_t unit_x, unit_y;
1468 pixman_format_code_t src_format;
1472 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1473 /* pass in 0 instead of src_x and src_y because src_x and src_y need to be
1474 * transformed from destination space to source space
1476 PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, src_stride, src_line, 1);
1478 /* reference point is the center of the pixel */
1479 v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2;
1480 v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2;
1481 v.vector[2] = pixman_fixed_1;
1483 if (!pixman_transform_point_3d (src_image->common.transform, &v))
1486 unit_x = src_image->common.transform->matrix[0][0];
1487 unit_y = src_image->common.transform->matrix[1][1];
1489 /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */
1490 v.vector[0] -= pixman_fixed_e;
1491 v.vector[1] -= pixman_fixed_e;
1493 src_height = src_image->bits.height;
1494 src_width = src_image->bits.width;
1495 src_repeat = src_image->common.repeat;
1496 src_format = src_image->bits.format;
1501 pixman_fixed_t vx = v.vector[0];
1502 int y = pixman_fixed_to_int (vy);
1503 uint32_t *dst = dst_line;
1505 dst_line += dst_stride;
1507 /* adjust the y location by a unit vector in the y direction
1508 * this is equivalent to transforming y+1 of the destination point to source space */
1511 if (!repeat (src_repeat, &y, src_height))
1513 if (op == PIXMAN_OP_SRC)
1514 memset (dst, 0, sizeof (*dst) * width);
1520 uint32_t *src = src_line + y * src_stride;
1527 x1 = pixman_fixed_to_int (vx);
1530 x2 = pixman_fixed_to_int (vx);
1535 s1 = fetch_nearest (src_repeat, src_format, src, x1, src_width);
1536 s2 = fetch_nearest (src_repeat, src_format, src, x2, src_width);
1538 if (op == PIXMAN_OP_OVER)
1540 combine_over (s1, dst++);
1541 combine_over (s2, dst++);
1545 combine_src (s1, dst++);
1546 combine_src (s2, dst++);
1555 x = pixman_fixed_to_int (vx);
1558 s = fetch_nearest (src_repeat, src_format, src, x, src_width);
1560 if (op == PIXMAN_OP_OVER)
1561 combine_over (s, dst++);
1563 combine_src (s, dst++);
1569 #define CACHE_LINE_SIZE 64
1571 #define FAST_SIMPLE_ROTATE(suffix, pix_type) \
1574 blt_rotated_90_trivial_##suffix (pix_type *dst, \
1576 const pix_type *src, \
1582 for (y = 0; y < h; y++) \
1584 const pix_type *s = src + (h - y - 1); \
1585 pix_type *d = dst + dst_stride * y; \
1586 for (x = 0; x < w; x++) \
1595 blt_rotated_270_trivial_##suffix (pix_type *dst, \
1597 const pix_type *src, \
1603 for (y = 0; y < h; y++) \
1605 const pix_type *s = src + src_stride * (w - 1) + y; \
1606 pix_type *d = dst + dst_stride * y; \
1607 for (x = 0; x < w; x++) \
1616 blt_rotated_90_##suffix (pix_type *dst, \
1618 const pix_type *src, \
1624 int leading_pixels = 0, trailing_pixels = 0; \
1625 const int TILE_SIZE = CACHE_LINE_SIZE / sizeof(pix_type); \
1628 * split processing into handling destination as TILE_SIZExH cache line \
1629 * aligned vertical stripes (optimistically assuming that destination \
1630 * stride is a multiple of cache line, if not - it will be just a bit \
1634 if ((uintptr_t)dst & (CACHE_LINE_SIZE - 1)) \
1636 leading_pixels = TILE_SIZE - (((uintptr_t)dst & \
1637 (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \
1638 if (leading_pixels > W) \
1639 leading_pixels = W; \
1641 /* unaligned leading part NxH (where N < TILE_SIZE) */ \
1642 blt_rotated_90_trivial_##suffix ( \
1650 dst += leading_pixels; \
1651 src += leading_pixels * src_stride; \
1652 W -= leading_pixels; \
1655 if ((uintptr_t)(dst + W) & (CACHE_LINE_SIZE - 1)) \
1657 trailing_pixels = (((uintptr_t)(dst + W) & \
1658 (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \
1659 if (trailing_pixels > W) \
1660 trailing_pixels = W; \
1661 W -= trailing_pixels; \
1664 for (x = 0; x < W; x += TILE_SIZE) \
1666 /* aligned middle part TILE_SIZExH */ \
1667 blt_rotated_90_trivial_##suffix ( \
1670 src + src_stride * x, \
1676 if (trailing_pixels) \
1678 /* unaligned trailing part NxH (where N < TILE_SIZE) */ \
1679 blt_rotated_90_trivial_##suffix ( \
1682 src + W * src_stride, \
1690 blt_rotated_270_##suffix (pix_type *dst, \
1692 const pix_type *src, \
1698 int leading_pixels = 0, trailing_pixels = 0; \
1699 const int TILE_SIZE = CACHE_LINE_SIZE / sizeof(pix_type); \
1702 * split processing into handling destination as TILE_SIZExH cache line \
1703 * aligned vertical stripes (optimistically assuming that destination \
1704 * stride is a multiple of cache line, if not - it will be just a bit \
1708 if ((uintptr_t)dst & (CACHE_LINE_SIZE - 1)) \
1710 leading_pixels = TILE_SIZE - (((uintptr_t)dst & \
1711 (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \
1712 if (leading_pixels > W) \
1713 leading_pixels = W; \
1715 /* unaligned leading part NxH (where N < TILE_SIZE) */ \
1716 blt_rotated_270_trivial_##suffix ( \
1719 src + src_stride * (W - leading_pixels), \
1724 dst += leading_pixels; \
1725 W -= leading_pixels; \
1728 if ((uintptr_t)(dst + W) & (CACHE_LINE_SIZE - 1)) \
1730 trailing_pixels = (((uintptr_t)(dst + W) & \
1731 (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \
1732 if (trailing_pixels > W) \
1733 trailing_pixels = W; \
1734 W -= trailing_pixels; \
1735 src += trailing_pixels * src_stride; \
1738 for (x = 0; x < W; x += TILE_SIZE) \
1740 /* aligned middle part TILE_SIZExH */ \
1741 blt_rotated_270_trivial_##suffix ( \
1744 src + src_stride * (W - x - TILE_SIZE), \
1750 if (trailing_pixels) \
1752 /* unaligned trailing part NxH (where N < TILE_SIZE) */ \
1753 blt_rotated_270_trivial_##suffix ( \
1756 src - trailing_pixels * src_stride, \
1764 fast_composite_rotate_90_##suffix (pixman_implementation_t *imp, \
1765 pixman_composite_info_t *info) \
1767 PIXMAN_COMPOSITE_ARGS (info); \
1768 pix_type *dst_line; \
1769 pix_type *src_line; \
1770 int dst_stride, src_stride; \
1771 int src_x_t, src_y_t; \
1773 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, pix_type, \
1774 dst_stride, dst_line, 1); \
1775 src_x_t = -src_y + pixman_fixed_to_int ( \
1776 src_image->common.transform->matrix[0][2] + \
1777 pixman_fixed_1 / 2 - pixman_fixed_e) - height;\
1778 src_y_t = src_x + pixman_fixed_to_int ( \
1779 src_image->common.transform->matrix[1][2] + \
1780 pixman_fixed_1 / 2 - pixman_fixed_e); \
1781 PIXMAN_IMAGE_GET_LINE (src_image, src_x_t, src_y_t, pix_type, \
1782 src_stride, src_line, 1); \
1783 blt_rotated_90_##suffix (dst_line, dst_stride, src_line, src_stride, \
1788 fast_composite_rotate_270_##suffix (pixman_implementation_t *imp, \
1789 pixman_composite_info_t *info) \
1791 PIXMAN_COMPOSITE_ARGS (info); \
1792 pix_type *dst_line; \
1793 pix_type *src_line; \
1794 int dst_stride, src_stride; \
1795 int src_x_t, src_y_t; \
1797 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, pix_type, \
1798 dst_stride, dst_line, 1); \
1799 src_x_t = src_y + pixman_fixed_to_int ( \
1800 src_image->common.transform->matrix[0][2] + \
1801 pixman_fixed_1 / 2 - pixman_fixed_e); \
1802 src_y_t = -src_x + pixman_fixed_to_int ( \
1803 src_image->common.transform->matrix[1][2] + \
1804 pixman_fixed_1 / 2 - pixman_fixed_e) - width; \
1805 PIXMAN_IMAGE_GET_LINE (src_image, src_x_t, src_y_t, pix_type, \
1806 src_stride, src_line, 1); \
1807 blt_rotated_270_##suffix (dst_line, dst_stride, src_line, src_stride, \
1811 FAST_SIMPLE_ROTATE (8, uint8_t)
1812 FAST_SIMPLE_ROTATE (565, uint16_t)
1813 FAST_SIMPLE_ROTATE (8888, uint32_t)
1815 static const pixman_fast_path_t c_fast_paths[] =
1817 PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, fast_composite_over_n_8_0565),
1818 PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, fast_composite_over_n_8_0565),
1819 PIXMAN_STD_FAST_PATH (OVER, solid, a8, r8g8b8, fast_composite_over_n_8_0888),
1820 PIXMAN_STD_FAST_PATH (OVER, solid, a8, b8g8r8, fast_composite_over_n_8_0888),
1821 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, fast_composite_over_n_8_8888),
1822 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, fast_composite_over_n_8_8888),
1823 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, fast_composite_over_n_8_8888),
1824 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, fast_composite_over_n_8_8888),
1825 PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8r8g8b8, fast_composite_over_n_1_8888),
1826 PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8r8g8b8, fast_composite_over_n_1_8888),
1827 PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8b8g8r8, fast_composite_over_n_1_8888),
1828 PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8b8g8r8, fast_composite_over_n_1_8888),
1829 PIXMAN_STD_FAST_PATH (OVER, solid, a1, r5g6b5, fast_composite_over_n_1_0565),
1830 PIXMAN_STD_FAST_PATH (OVER, solid, a1, b5g6r5, fast_composite_over_n_1_0565),
1831 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, fast_composite_over_n_8888_8888_ca),
1832 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, fast_composite_over_n_8888_8888_ca),
1833 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, fast_composite_over_n_8888_0565_ca),
1834 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, fast_composite_over_n_8888_8888_ca),
1835 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, fast_composite_over_n_8888_8888_ca),
1836 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, fast_composite_over_n_8888_0565_ca),
1837 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, fast_composite_over_x888_8_8888),
1838 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, fast_composite_over_x888_8_8888),
1839 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, fast_composite_over_x888_8_8888),
1840 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, fast_composite_over_x888_8_8888),
1841 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, fast_composite_over_8888_8888),
1842 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, fast_composite_over_8888_8888),
1843 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, fast_composite_over_8888_0565),
1844 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, fast_composite_over_8888_8888),
1845 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, fast_composite_over_8888_8888),
1846 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, fast_composite_over_8888_0565),
1847 PIXMAN_STD_FAST_PATH (ADD, r5g6b5, null, r5g6b5, fast_composite_add_0565_0565),
1848 PIXMAN_STD_FAST_PATH (ADD, b5g6r5, null, b5g6r5, fast_composite_add_0565_0565),
1849 PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, fast_composite_add_8888_8888),
1850 PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, fast_composite_add_8888_8888),
1851 PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, fast_composite_add_8_8),
1852 PIXMAN_STD_FAST_PATH (ADD, a1, null, a1, fast_composite_add_1_1),
1853 PIXMAN_STD_FAST_PATH_CA (ADD, solid, a8r8g8b8, a8r8g8b8, fast_composite_add_n_8888_8888_ca),
1854 PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, fast_composite_add_n_8_8),
1855 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8r8g8b8, fast_composite_solid_fill),
1856 PIXMAN_STD_FAST_PATH (SRC, solid, null, x8r8g8b8, fast_composite_solid_fill),
1857 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8b8g8r8, fast_composite_solid_fill),
1858 PIXMAN_STD_FAST_PATH (SRC, solid, null, x8b8g8r8, fast_composite_solid_fill),
1859 PIXMAN_STD_FAST_PATH (SRC, solid, null, a1, fast_composite_solid_fill),
1860 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8, fast_composite_solid_fill),
1861 PIXMAN_STD_FAST_PATH (SRC, solid, null, r5g6b5, fast_composite_solid_fill),
1862 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, fast_composite_src_x888_8888),
1863 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, fast_composite_src_x888_8888),
1864 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy),
1865 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, fast_composite_src_memcpy),
1866 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy),
1867 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy),
1868 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, fast_composite_src_memcpy),
1869 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy),
1870 PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8x8, fast_composite_src_memcpy),
1871 PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8a8, fast_composite_src_memcpy),
1872 PIXMAN_STD_FAST_PATH (SRC, b8g8r8x8, null, b8g8r8x8, fast_composite_src_memcpy),
1873 PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, fast_composite_src_memcpy),
1874 PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, fast_composite_src_memcpy),
1875 PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, fast_composite_src_memcpy),
1876 PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, b8g8r8, fast_composite_src_memcpy),
1877 PIXMAN_STD_FAST_PATH (SRC, x1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy),
1878 PIXMAN_STD_FAST_PATH (SRC, a1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy),
1879 PIXMAN_STD_FAST_PATH (SRC, a8, null, a8, fast_composite_src_memcpy),
1880 PIXMAN_STD_FAST_PATH (IN, a8, null, a8, fast_composite_in_8_8),
1881 PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, fast_composite_in_n_8_8),
1883 SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, 8888_8888),
1884 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, 8888_8888),
1885 SIMPLE_NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8, 8888_8888),
1886 SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8, 8888_8888),
1888 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, 8888_8888),
1889 SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8, 8888_8888),
1891 SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, r5g6b5, 8888_565),
1892 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, r5g6b5, 8888_565),
1894 SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, r5g6b5, 565_565),
1896 SIMPLE_NEAREST_FAST_PATH_COVER (SRC, x8r8g8b8, a8r8g8b8, x888_8888),
1897 SIMPLE_NEAREST_FAST_PATH_COVER (SRC, x8b8g8r8, a8b8g8r8, x888_8888),
1898 SIMPLE_NEAREST_FAST_PATH_PAD (SRC, x8r8g8b8, a8r8g8b8, x888_8888),
1899 SIMPLE_NEAREST_FAST_PATH_PAD (SRC, x8b8g8r8, a8b8g8r8, x888_8888),
1900 SIMPLE_NEAREST_FAST_PATH_NORMAL (SRC, x8r8g8b8, a8r8g8b8, x888_8888),
1901 SIMPLE_NEAREST_FAST_PATH_NORMAL (SRC, x8b8g8r8, a8b8g8r8, x888_8888),
1903 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, 8888_8888),
1904 SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, 8888_8888),
1905 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, 8888_8888),
1906 SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, 8888_8888),
1908 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, r5g6b5, 8888_565),
1910 #define NEAREST_FAST_PATH(op,s,d) \
1911 { PIXMAN_OP_ ## op, \
1912 PIXMAN_ ## s, SCALED_NEAREST_FLAGS, \
1914 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1915 fast_composite_scaled_nearest, \
1918 NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8),
1919 NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8),
1920 NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8),
1921 NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8),
1923 NEAREST_FAST_PATH (SRC, x8r8g8b8, a8r8g8b8),
1924 NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8),
1925 NEAREST_FAST_PATH (SRC, x8b8g8r8, a8b8g8r8),
1926 NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8),
1928 NEAREST_FAST_PATH (OVER, x8r8g8b8, x8r8g8b8),
1929 NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8),
1930 NEAREST_FAST_PATH (OVER, x8b8g8r8, x8b8g8r8),
1931 NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8),
1933 NEAREST_FAST_PATH (OVER, x8r8g8b8, a8r8g8b8),
1934 NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8),
1935 NEAREST_FAST_PATH (OVER, x8b8g8r8, a8b8g8r8),
1936 NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8),
1938 #define SIMPLE_ROTATE_FLAGS(angle) \
1939 (FAST_PATH_ROTATE_ ## angle ## _TRANSFORM | \
1940 FAST_PATH_NEAREST_FILTER | \
1941 FAST_PATH_SAMPLES_COVER_CLIP_NEAREST | \
1942 FAST_PATH_STANDARD_FLAGS)
1944 #define SIMPLE_ROTATE_FAST_PATH(op,s,d,suffix) \
1945 { PIXMAN_OP_ ## op, \
1946 PIXMAN_ ## s, SIMPLE_ROTATE_FLAGS (90), \
1948 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1949 fast_composite_rotate_90_##suffix, \
1951 { PIXMAN_OP_ ## op, \
1952 PIXMAN_ ## s, SIMPLE_ROTATE_FLAGS (270), \
1954 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1955 fast_composite_rotate_270_##suffix, \
1958 SIMPLE_ROTATE_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, 8888),
1959 SIMPLE_ROTATE_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, 8888),
1960 SIMPLE_ROTATE_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, 8888),
1961 SIMPLE_ROTATE_FAST_PATH (SRC, r5g6b5, r5g6b5, 565),
1962 SIMPLE_ROTATE_FAST_PATH (SRC, a8, a8, 8),
1964 /* Simple repeat fast path entry. */
1967 (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | FAST_PATH_BITS_IMAGE |
1968 FAST_PATH_NORMAL_REPEAT),
1970 PIXMAN_any, FAST_PATH_STD_DEST_FLAGS,
1971 fast_composite_tiled_repeat
1977 #ifdef WORDS_BIGENDIAN
1978 #define A1_FILL_MASK(n, offs) (((1U << (n)) - 1) << (32 - (offs) - (n)))
1980 #define A1_FILL_MASK(n, offs) (((1U << (n)) - 1) << (offs))
1983 static force_inline void
1984 pixman_fill1_line (uint32_t *dst, int offs, int width, int v)
1988 int leading_pixels = 32 - offs;
1989 if (leading_pixels >= width)
1992 *dst |= A1_FILL_MASK (width, offs);
1994 *dst &= ~A1_FILL_MASK (width, offs);
2000 *dst++ |= A1_FILL_MASK (leading_pixels, offs);
2002 *dst++ &= ~A1_FILL_MASK (leading_pixels, offs);
2003 width -= leading_pixels;
2009 *dst++ = 0xFFFFFFFF;
2017 *dst |= A1_FILL_MASK (width, 0);
2019 *dst &= ~A1_FILL_MASK (width, 0);
2024 pixman_fill1 (uint32_t *bits,
2032 uint32_t *dst = bits + y * stride + (x >> 5);
2039 pixman_fill1_line (dst, offs, width, 1);
2047 pixman_fill1_line (dst, offs, width, 0);
2054 pixman_fill8 (uint32_t *bits,
2062 int byte_stride = stride * (int) sizeof (uint32_t);
2063 uint8_t *dst = (uint8_t *) bits;
2064 uint8_t v = filler & 0xff;
2067 dst = dst + y * byte_stride + x;
2071 for (i = 0; i < width; ++i)
2079 pixman_fill16 (uint32_t *bits,
2088 (stride * (int)sizeof (uint32_t)) / (int)sizeof (uint16_t);
2089 uint16_t *dst = (uint16_t *)bits;
2090 uint16_t v = filler & 0xffff;
2093 dst = dst + y * short_stride + x;
2097 for (i = 0; i < width; ++i)
2100 dst += short_stride;
2105 pixman_fill32 (uint32_t *bits,
2115 bits = bits + y * stride + x;
2119 for (i = 0; i < width; ++i)
2126 static pixman_bool_t
2127 fast_path_fill (pixman_implementation_t *imp,
2140 pixman_fill1 (bits, stride, x, y, width, height, filler);
2144 pixman_fill8 (bits, stride, x, y, width, height, filler);
2148 pixman_fill16 (bits, stride, x, y, width, height, filler);
2152 pixman_fill32 (bits, stride, x, y, width, height, filler);
2162 /*****************************************************************************/
2165 fast_fetch_r5g6b5 (pixman_iter_t *iter, const uint32_t *mask)
2167 int32_t w = iter->width;
2168 uint32_t *dst = iter->buffer;
2169 const uint16_t *src = (const uint16_t *)iter->bits;
2171 iter->bits += iter->stride;
2173 /* Align the source buffer at 4 bytes boundary */
2174 if (w > 0 && ((uintptr_t)src & 3))
2176 *dst++ = convert_0565_to_8888 (*src++);
2179 /* Process two pixels per iteration */
2180 while ((w -= 2) >= 0)
2182 uint32_t sr, sb, sg, t0, t1;
2183 uint32_t s = *(const uint32_t *)src;
2185 sr = (s >> 8) & 0x00F800F8;
2186 sb = (s << 3) & 0x00F800F8;
2187 sg = (s >> 3) & 0x00FC00FC;
2191 t0 = ((sr << 16) & 0x00FF0000) | ((sg << 8) & 0x0000FF00) |
2192 (sb & 0xFF) | 0xFF000000;
2193 t1 = (sr & 0x00FF0000) | ((sg >> 8) & 0x0000FF00) |
2194 (sb >> 16) | 0xFF000000;
2195 #ifdef WORDS_BIGENDIAN
2205 *dst = convert_0565_to_8888 (*src);
2208 return iter->buffer;
2212 fast_dest_fetch_noop (pixman_iter_t *iter, const uint32_t *mask)
2214 iter->bits += iter->stride;
2215 return iter->buffer;
2218 /* Helper function for a workaround, which tries to ensure that 0x1F001F
2219 * constant is always allocated in a register on RISC architectures.
2221 static force_inline uint32_t
2222 convert_8888_to_0565_workaround (uint32_t s, uint32_t x1F001F)
2225 a = (s >> 3) & x1F001F;
2233 fast_write_back_r5g6b5 (pixman_iter_t *iter)
2235 int32_t w = iter->width;
2236 uint16_t *dst = (uint16_t *)(iter->bits - iter->stride);
2237 const uint32_t *src = iter->buffer;
2238 /* Workaround to ensure that x1F001F variable is allocated in a register */
2239 static volatile uint32_t volatile_x1F001F = 0x1F001F;
2240 uint32_t x1F001F = volatile_x1F001F;
2242 while ((w -= 4) >= 0)
2244 uint32_t s1 = *src++;
2245 uint32_t s2 = *src++;
2246 uint32_t s3 = *src++;
2247 uint32_t s4 = *src++;
2248 *dst++ = convert_8888_to_0565_workaround (s1, x1F001F);
2249 *dst++ = convert_8888_to_0565_workaround (s2, x1F001F);
2250 *dst++ = convert_8888_to_0565_workaround (s3, x1F001F);
2251 *dst++ = convert_8888_to_0565_workaround (s4, x1F001F);
2255 *dst++ = convert_8888_to_0565_workaround (*src++, x1F001F);
2256 *dst++ = convert_8888_to_0565_workaround (*src++, x1F001F);
2260 *dst = convert_8888_to_0565_workaround (*src, x1F001F);
2279 fetch_horizontal (bits_image_t *image, line_t *line,
2280 int y, pixman_fixed_t x, pixman_fixed_t ux, int n)
2282 uint32_t *bits = image->bits + y * image->rowstride;
2285 for (i = 0; i < n; ++i)
2287 int x0 = pixman_fixed_to_int (x);
2291 uint32_t left = *(bits + x0);
2292 uint32_t right = *(bits + x1);
2294 dist_x = pixman_fixed_to_bilinear_weight (x);
2295 dist_x <<= (8 - BILINEAR_INTERPOLATION_BITS);
2297 #if SIZEOF_LONG <= 4
2299 uint32_t lag, rag, ag;
2300 uint32_t lrb, rrb, rb;
2302 lag = (left & 0xff00ff00) >> 8;
2303 rag = (right & 0xff00ff00) >> 8;
2304 ag = (lag << 8) + dist_x * (rag - lag);
2306 lrb = (left & 0x00ff00ff);
2307 rrb = (right & 0x00ff00ff);
2308 rb = (lrb << 8) + dist_x * (rrb - lrb);
2310 *((uint32_t *)(line->buffer + i)) = ag;
2311 *((uint32_t *)(line->buffer + i) + 1) = rb;
2315 uint64_t lagrb, ragrb;
2319 lag = (left & 0xff00ff00);
2320 lrb = (left & 0x00ff00ff);
2321 rag = (right & 0xff00ff00);
2322 rrb = (right & 0x00ff00ff);
2323 lagrb = (((uint64_t)lag) << 24) | lrb;
2324 ragrb = (((uint64_t)rag) << 24) | rrb;
2326 line->buffer[i] = (lagrb << 8) + dist_x * (ragrb - lagrb);
2337 fast_fetch_bilinear_cover (pixman_iter_t *iter, const uint32_t *mask)
2339 pixman_fixed_t fx, ux;
2340 bilinear_info_t *info = iter->data;
2341 line_t *line0, *line1;
2347 ux = iter->image->common.transform->matrix[0][0];
2349 y0 = pixman_fixed_to_int (info->y);
2351 dist_y = pixman_fixed_to_bilinear_weight (info->y);
2352 dist_y <<= (8 - BILINEAR_INTERPOLATION_BITS);
2354 line0 = &info->lines[y0 & 0x01];
2355 line1 = &info->lines[y1 & 0x01];
2360 &iter->image->bits, line0, y0, fx, ux, iter->width);
2366 &iter->image->bits, line1, y1, fx, ux, iter->width);
2369 for (i = 0; i < iter->width; ++i)
2371 #if SIZEOF_LONG <= 4
2372 uint32_t ta, tr, tg, tb;
2373 uint32_t ba, br, bg, bb;
2376 uint32_t a, r, g, b;
2378 tag = *((uint32_t *)(line0->buffer + i));
2379 trb = *((uint32_t *)(line0->buffer + i) + 1);
2380 bag = *((uint32_t *)(line1->buffer + i));
2381 brb = *((uint32_t *)(line1->buffer + i) + 1);
2385 a = (ta << 8) + dist_y * (ba - ta);
2389 r = (tr << 8) + dist_y * (br - tr);
2393 g = (tg << 8) + dist_y * (bg - tg);
2397 b = (tb << 8) + dist_y * (bb - tb);
2399 a = (a << 8) & 0xff000000;
2400 r = (r << 0) & 0x00ff0000;
2401 g = (g >> 8) & 0x0000ff00;
2402 b = (b >> 16) & 0x000000ff;
2404 uint64_t top = line0->buffer[i];
2405 uint64_t bot = line1->buffer[i];
2406 uint64_t tar = (top & 0xffff0000ffff0000ULL) >> 16;
2407 uint64_t bar = (bot & 0xffff0000ffff0000ULL) >> 16;
2408 uint64_t tgb = (top & 0x0000ffff0000ffffULL);
2409 uint64_t bgb = (bot & 0x0000ffff0000ffffULL);
2411 uint32_t a, r, g, b;
2413 ar = (tar << 8) + dist_y * (bar - tar);
2414 gb = (tgb << 8) + dist_y * (bgb - tgb);
2416 a = ((ar >> 24) & 0xff000000);
2417 r = ((ar >> 0) & 0x00ff0000);
2418 g = ((gb >> 40) & 0x0000ff00);
2419 b = ((gb >> 16) & 0x000000ff);
2422 iter->buffer[i] = a | r | g | b;
2425 info->y += iter->image->common.transform->matrix[1][1];
2427 return iter->buffer;
2431 bilinear_cover_iter_fini (pixman_iter_t *iter)
2437 fast_bilinear_cover_iter_init (pixman_iter_t *iter, const pixman_iter_info_t *iter_info)
2439 int width = iter->width;
2440 bilinear_info_t *info;
2443 /* Reference point is the center of the pixel */
2444 v.vector[0] = pixman_int_to_fixed (iter->x) + pixman_fixed_1 / 2;
2445 v.vector[1] = pixman_int_to_fixed (iter->y) + pixman_fixed_1 / 2;
2446 v.vector[2] = pixman_fixed_1;
2448 if (!pixman_transform_point_3d (iter->image->common.transform, &v))
2451 info = malloc (sizeof (*info) + (2 * width - 1) * sizeof (uint64_t));
2455 info->x = v.vector[0] - pixman_fixed_1 / 2;
2456 info->y = v.vector[1] - pixman_fixed_1 / 2;
2458 /* It is safe to set the y coordinates to -1 initially
2459 * because COVER_CLIP_BILINEAR ensures that we will only
2460 * be asked to fetch lines in the [0, height) interval
2462 info->lines[0].y = -1;
2463 info->lines[0].buffer = &(info->data[0]);
2464 info->lines[1].y = -1;
2465 info->lines[1].buffer = &(info->data[width]);
2467 iter->get_scanline = fast_fetch_bilinear_cover;
2468 iter->fini = bilinear_cover_iter_fini;
2474 /* Something went wrong, either a bad matrix or OOM; in such cases,
2475 * we don't guarantee any particular rendering.
2478 FUNC, "Allocation failure or bad matrix, skipping rendering\n");
2480 iter->get_scanline = _pixman_iter_get_scanline_noop;
2485 bits_image_fetch_bilinear_no_repeat_8888 (pixman_iter_t *iter,
2486 const uint32_t *mask)
2489 pixman_image_t * ima = iter->image;
2490 int offset = iter->x;
2491 int line = iter->y++;
2492 int width = iter->width;
2493 uint32_t * buffer = iter->buffer;
2495 bits_image_t *bits = &ima->bits;
2496 pixman_fixed_t x_top, x_bottom, x;
2497 pixman_fixed_t ux_top, ux_bottom, ux;
2499 uint32_t top_mask, bottom_mask;
2501 uint32_t *bottom_row;
2503 uint32_t zero[2] = { 0, 0 };
2510 /* reference point is the center of the pixel */
2511 v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2;
2512 v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2;
2513 v.vector[2] = pixman_fixed_1;
2515 if (!pixman_transform_point_3d (bits->common.transform, &v))
2516 return iter->buffer;
2518 ux = ux_top = ux_bottom = bits->common.transform->matrix[0][0];
2519 x = x_top = x_bottom = v.vector[0] - pixman_fixed_1/2;
2521 y = v.vector[1] - pixman_fixed_1/2;
2522 disty = pixman_fixed_to_bilinear_weight (y);
2524 /* Load the pointers to the first and second lines from the source
2525 * image that bilinear code must read.
2527 * The main trick in this code is about the check if any line are
2528 * outside of the image;
2530 * When I realize that a line (any one) is outside, I change
2531 * the pointer to a dummy area with zeros. Once I change this, I
2532 * must be sure the pointer will not change, so I set the
2533 * variables to each pointer increments inside the loop.
2535 y1 = pixman_fixed_to_int (y);
2538 if (y1 < 0 || y1 >= bits->height)
2546 top_row = bits->bits + y1 * bits->rowstride;
2551 if (y2 < 0 || y2 >= bits->height)
2559 bottom_row = bits->bits + y2 * bits->rowstride;
2564 /* Instead of checking whether the operation uses the mast in
2565 * each loop iteration, verify this only once and prepare the
2566 * variables to make the code smaller inside the loop.
2575 /* If have a mask, prepare the variables to check it */
2579 /* If both are zero, then the whole thing is zero */
2580 if (top_row == zero && bottom_row == zero)
2582 memset (buffer, 0, width * sizeof (uint32_t));
2583 return iter->buffer;
2585 else if (bits->format == PIXMAN_x8r8g8b8)
2587 if (top_row == zero)
2590 bottom_mask = 0xff000000;
2592 else if (bottom_row == zero)
2594 top_mask = 0xff000000;
2599 top_mask = 0xff000000;
2600 bottom_mask = 0xff000000;
2609 end = buffer + width;
2611 /* Zero fill to the left of the image */
2612 while (buffer < end && x < pixman_fixed_minus_1)
2617 x_bottom += ux_bottom;
2623 while (buffer < end && x < 0)
2628 tr = top_row[pixman_fixed_to_int (x_top) + 1] | top_mask;
2629 br = bottom_row[pixman_fixed_to_int (x_bottom) + 1] | bottom_mask;
2631 distx = pixman_fixed_to_bilinear_weight (x);
2633 *buffer++ = bilinear_interpolation (0, tr, 0, br, distx, disty);
2637 x_bottom += ux_bottom;
2642 w = pixman_int_to_fixed (bits->width - 1);
2644 while (buffer < end && x < w)
2648 uint32_t tl, tr, bl, br;
2651 tl = top_row [pixman_fixed_to_int (x_top)] | top_mask;
2652 tr = top_row [pixman_fixed_to_int (x_top) + 1] | top_mask;
2653 bl = bottom_row [pixman_fixed_to_int (x_bottom)] | bottom_mask;
2654 br = bottom_row [pixman_fixed_to_int (x_bottom) + 1] | bottom_mask;
2656 distx = pixman_fixed_to_bilinear_weight (x);
2658 *buffer = bilinear_interpolation (tl, tr, bl, br, distx, disty);
2664 x_bottom += ux_bottom;
2669 w = pixman_int_to_fixed (bits->width);
2670 while (buffer < end && x < w)
2677 tl = top_row [pixman_fixed_to_int (x_top)] | top_mask;
2678 bl = bottom_row [pixman_fixed_to_int (x_bottom)] | bottom_mask;
2680 distx = pixman_fixed_to_bilinear_weight (x);
2682 *buffer = bilinear_interpolation (tl, 0, bl, 0, distx, disty);
2688 x_bottom += ux_bottom;
2692 /* Zero fill to the left of the image */
2693 while (buffer < end)
2696 return iter->buffer;
2699 typedef uint32_t (* convert_pixel_t) (const uint8_t *row, int x);
2701 static force_inline void
2702 bits_image_fetch_separable_convolution_affine (pixman_image_t * image,
2707 const uint32_t * mask,
2709 convert_pixel_t convert_pixel,
2710 pixman_format_code_t format,
2711 pixman_repeat_t repeat_mode)
2713 bits_image_t *bits = &image->bits;
2714 pixman_fixed_t *params = image->common.filter_params;
2715 int cwidth = pixman_fixed_to_int (params[0]);
2716 int cheight = pixman_fixed_to_int (params[1]);
2717 int x_off = ((cwidth << 16) - pixman_fixed_1) >> 1;
2718 int y_off = ((cheight << 16) - pixman_fixed_1) >> 1;
2719 int x_phase_bits = pixman_fixed_to_int (params[2]);
2720 int y_phase_bits = pixman_fixed_to_int (params[3]);
2721 int x_phase_shift = 16 - x_phase_bits;
2722 int y_phase_shift = 16 - y_phase_bits;
2723 pixman_fixed_t vx, vy;
2724 pixman_fixed_t ux, uy;
2728 /* reference point is the center of the pixel */
2729 v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2;
2730 v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2;
2731 v.vector[2] = pixman_fixed_1;
2733 if (!pixman_transform_point_3d (image->common.transform, &v))
2736 ux = image->common.transform->matrix[0][0];
2737 uy = image->common.transform->matrix[1][0];
2742 for (k = 0; k < width; ++k)
2744 pixman_fixed_t *y_params;
2745 int satot, srtot, sgtot, sbtot;
2746 pixman_fixed_t x, y;
2747 int32_t x1, x2, y1, y2;
2751 if (mask && !mask[k])
2754 /* Round x and y to the middle of the closest phase before continuing. This
2755 * ensures that the convolution matrix is aligned right, since it was
2756 * positioned relative to a particular phase (and not relative to whatever
2757 * exact fraction we happen to get here).
2759 x = ((vx >> x_phase_shift) << x_phase_shift) + ((1 << x_phase_shift) >> 1);
2760 y = ((vy >> y_phase_shift) << y_phase_shift) + ((1 << y_phase_shift) >> 1);
2762 px = (x & 0xffff) >> x_phase_shift;
2763 py = (y & 0xffff) >> y_phase_shift;
2765 x1 = pixman_fixed_to_int (x - pixman_fixed_e - x_off);
2766 y1 = pixman_fixed_to_int (y - pixman_fixed_e - y_off);
2770 satot = srtot = sgtot = sbtot = 0;
2772 y_params = params + 4 + (1 << x_phase_bits) * cwidth + py * cheight;
2774 for (i = y1; i < y2; ++i)
2776 pixman_fixed_t fy = *y_params++;
2780 pixman_fixed_t *x_params = params + 4 + px * cwidth;
2782 for (j = x1; j < x2; ++j)
2784 pixman_fixed_t fx = *x_params++;
2791 uint32_t pixel, mask;
2794 mask = PIXMAN_FORMAT_A (format)? 0 : 0xff000000;
2796 if (repeat_mode != PIXMAN_REPEAT_NONE)
2798 repeat (repeat_mode, &rx, bits->width);
2799 repeat (repeat_mode, &ry, bits->height);
2801 row = (uint8_t *)bits->bits + bits->rowstride * 4 * ry;
2802 pixel = convert_pixel (row, rx) | mask;
2806 if (rx < 0 || ry < 0 || rx >= bits->width || ry >= bits->height)
2812 row = (uint8_t *)bits->bits + bits->rowstride * 4 * ry;
2813 pixel = convert_pixel (row, rx) | mask;
2817 f = ((pixman_fixed_32_32_t)fx * fy + 0x8000) >> 16;
2818 srtot += (int)RED_8 (pixel) * f;
2819 sgtot += (int)GREEN_8 (pixel) * f;
2820 sbtot += (int)BLUE_8 (pixel) * f;
2821 satot += (int)ALPHA_8 (pixel) * f;
2827 satot = (satot + 0x8000) >> 16;
2828 srtot = (srtot + 0x8000) >> 16;
2829 sgtot = (sgtot + 0x8000) >> 16;
2830 sbtot = (sbtot + 0x8000) >> 16;
2832 satot = CLIP (satot, 0, 0xff);
2833 srtot = CLIP (srtot, 0, 0xff);
2834 sgtot = CLIP (sgtot, 0, 0xff);
2835 sbtot = CLIP (sbtot, 0, 0xff);
2837 buffer[k] = (satot << 24) | (srtot << 16) | (sgtot << 8) | (sbtot << 0);
2845 static const uint8_t zero[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
2847 static force_inline void
2848 bits_image_fetch_bilinear_affine (pixman_image_t * image,
2853 const uint32_t * mask,
2855 convert_pixel_t convert_pixel,
2856 pixman_format_code_t format,
2857 pixman_repeat_t repeat_mode)
2859 pixman_fixed_t x, y;
2860 pixman_fixed_t ux, uy;
2862 bits_image_t *bits = &image->bits;
2865 /* reference point is the center of the pixel */
2866 v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2;
2867 v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2;
2868 v.vector[2] = pixman_fixed_1;
2870 if (!pixman_transform_point_3d (image->common.transform, &v))
2873 ux = image->common.transform->matrix[0][0];
2874 uy = image->common.transform->matrix[1][0];
2879 for (i = 0; i < width; ++i)
2882 uint32_t tl, tr, bl, br;
2883 int32_t distx, disty;
2884 int width = image->bits.width;
2885 int height = image->bits.height;
2886 const uint8_t *row1;
2887 const uint8_t *row2;
2889 if (mask && !mask[i])
2892 x1 = x - pixman_fixed_1 / 2;
2893 y1 = y - pixman_fixed_1 / 2;
2895 distx = pixman_fixed_to_bilinear_weight (x1);
2896 disty = pixman_fixed_to_bilinear_weight (y1);
2898 y1 = pixman_fixed_to_int (y1);
2900 x1 = pixman_fixed_to_int (x1);
2903 if (repeat_mode != PIXMAN_REPEAT_NONE)
2907 mask = PIXMAN_FORMAT_A (format)? 0 : 0xff000000;
2909 repeat (repeat_mode, &x1, width);
2910 repeat (repeat_mode, &y1, height);
2911 repeat (repeat_mode, &x2, width);
2912 repeat (repeat_mode, &y2, height);
2914 row1 = (uint8_t *)bits->bits + bits->rowstride * 4 * y1;
2915 row2 = (uint8_t *)bits->bits + bits->rowstride * 4 * y2;
2917 tl = convert_pixel (row1, x1) | mask;
2918 tr = convert_pixel (row1, x2) | mask;
2919 bl = convert_pixel (row2, x1) | mask;
2920 br = convert_pixel (row2, x2) | mask;
2924 uint32_t mask1, mask2;
2927 /* Note: PIXMAN_FORMAT_BPP() returns an unsigned value,
2928 * which means if you use it in expressions, those
2929 * expressions become unsigned themselves. Since
2930 * the variables below can be negative in some cases,
2931 * that will lead to crashes on 64 bit architectures.
2933 * So this line makes sure bpp is signed
2935 bpp = PIXMAN_FORMAT_BPP (format);
2937 if (x1 >= width || x2 < 0 || y1 >= height || y2 < 0)
2950 row1 = (uint8_t *)bits->bits + bits->rowstride * 4 * y1;
2951 row1 += bpp / 8 * x1;
2953 mask1 = PIXMAN_FORMAT_A (format)? 0 : 0xff000000;
2956 if (y1 == height - 1)
2963 row2 = (uint8_t *)bits->bits + bits->rowstride * 4 * y2;
2964 row2 += bpp / 8 * x1;
2966 mask2 = PIXMAN_FORMAT_A (format)? 0 : 0xff000000;
2976 tl = convert_pixel (row1, 0) | mask1;
2977 bl = convert_pixel (row2, 0) | mask2;
2980 if (x1 == width - 1)
2987 tr = convert_pixel (row1, 1) | mask1;
2988 br = convert_pixel (row2, 1) | mask2;
2992 buffer[i] = bilinear_interpolation (
2993 tl, tr, bl, br, distx, disty);
3001 static force_inline void
3002 bits_image_fetch_nearest_affine (pixman_image_t * image,
3007 const uint32_t * mask,
3009 convert_pixel_t convert_pixel,
3010 pixman_format_code_t format,
3011 pixman_repeat_t repeat_mode)
3013 pixman_fixed_t x, y;
3014 pixman_fixed_t ux, uy;
3016 bits_image_t *bits = &image->bits;
3019 /* reference point is the center of the pixel */
3020 v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2;
3021 v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2;
3022 v.vector[2] = pixman_fixed_1;
3024 if (!pixman_transform_point_3d (image->common.transform, &v))
3027 ux = image->common.transform->matrix[0][0];
3028 uy = image->common.transform->matrix[1][0];
3033 for (i = 0; i < width; ++i)
3035 int width, height, x0, y0;
3038 if (mask && !mask[i])
3041 width = image->bits.width;
3042 height = image->bits.height;
3043 x0 = pixman_fixed_to_int (x - pixman_fixed_e);
3044 y0 = pixman_fixed_to_int (y - pixman_fixed_e);
3046 if (repeat_mode == PIXMAN_REPEAT_NONE &&
3047 (y0 < 0 || y0 >= height || x0 < 0 || x0 >= width))
3053 uint32_t mask = PIXMAN_FORMAT_A (format)? 0 : 0xff000000;
3055 if (repeat_mode != PIXMAN_REPEAT_NONE)
3057 repeat (repeat_mode, &x0, width);
3058 repeat (repeat_mode, &y0, height);
3061 row = (uint8_t *)bits->bits + bits->rowstride * 4 * y0;
3063 buffer[i] = convert_pixel (row, x0) | mask;
3072 static force_inline uint32_t
3073 convert_a8r8g8b8 (const uint8_t *row, int x)
3075 return *(((uint32_t *)row) + x);
3078 static force_inline uint32_t
3079 convert_x8r8g8b8 (const uint8_t *row, int x)
3081 return *(((uint32_t *)row) + x);
3084 static force_inline uint32_t
3085 convert_a8 (const uint8_t *row, int x)
3087 return *(row + x) << 24;
3090 static force_inline uint32_t
3091 convert_r5g6b5 (const uint8_t *row, int x)
3093 return convert_0565_to_0888 (*((uint16_t *)row + x));
3096 #define MAKE_SEPARABLE_CONVOLUTION_FETCHER(name, format, repeat_mode) \
3098 bits_image_fetch_separable_convolution_affine_ ## name (pixman_iter_t *iter, \
3099 const uint32_t * mask) \
3101 bits_image_fetch_separable_convolution_affine ( \
3103 iter->x, iter->y++, \
3105 iter->buffer, mask, \
3106 convert_ ## format, \
3107 PIXMAN_ ## format, \
3110 return iter->buffer; \
3113 #define MAKE_BILINEAR_FETCHER(name, format, repeat_mode) \
3115 bits_image_fetch_bilinear_affine_ ## name (pixman_iter_t *iter, \
3116 const uint32_t * mask) \
3118 bits_image_fetch_bilinear_affine (iter->image, \
3119 iter->x, iter->y++, \
3121 iter->buffer, mask, \
3122 convert_ ## format, \
3123 PIXMAN_ ## format, \
3125 return iter->buffer; \
3128 #define MAKE_NEAREST_FETCHER(name, format, repeat_mode) \
3130 bits_image_fetch_nearest_affine_ ## name (pixman_iter_t *iter, \
3131 const uint32_t * mask) \
3133 bits_image_fetch_nearest_affine (iter->image, \
3134 iter->x, iter->y++, \
3136 iter->buffer, mask, \
3137 convert_ ## format, \
3138 PIXMAN_ ## format, \
3140 return iter->buffer; \
3143 #define MAKE_FETCHERS(name, format, repeat_mode) \
3144 MAKE_NEAREST_FETCHER (name, format, repeat_mode) \
3145 MAKE_BILINEAR_FETCHER (name, format, repeat_mode) \
3146 MAKE_SEPARABLE_CONVOLUTION_FETCHER (name, format, repeat_mode)
3148 MAKE_FETCHERS (pad_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_PAD)
3149 MAKE_FETCHERS (none_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_NONE)
3150 MAKE_FETCHERS (reflect_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_REFLECT)
3151 MAKE_FETCHERS (normal_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_NORMAL)
3152 MAKE_FETCHERS (pad_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_PAD)
3153 MAKE_FETCHERS (none_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_NONE)
3154 MAKE_FETCHERS (reflect_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_REFLECT)
3155 MAKE_FETCHERS (normal_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_NORMAL)
3156 MAKE_FETCHERS (pad_a8, a8, PIXMAN_REPEAT_PAD)
3157 MAKE_FETCHERS (none_a8, a8, PIXMAN_REPEAT_NONE)
3158 MAKE_FETCHERS (reflect_a8, a8, PIXMAN_REPEAT_REFLECT)
3159 MAKE_FETCHERS (normal_a8, a8, PIXMAN_REPEAT_NORMAL)
3160 MAKE_FETCHERS (pad_r5g6b5, r5g6b5, PIXMAN_REPEAT_PAD)
3161 MAKE_FETCHERS (none_r5g6b5, r5g6b5, PIXMAN_REPEAT_NONE)
3162 MAKE_FETCHERS (reflect_r5g6b5, r5g6b5, PIXMAN_REPEAT_REFLECT)
3163 MAKE_FETCHERS (normal_r5g6b5, r5g6b5, PIXMAN_REPEAT_NORMAL)
3165 #define IMAGE_FLAGS \
3166 (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | \
3167 FAST_PATH_BITS_IMAGE | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST)
3169 static const pixman_iter_info_t fast_iters[] =
3171 { PIXMAN_r5g6b5, IMAGE_FLAGS, ITER_NARROW | ITER_SRC,
3172 _pixman_iter_init_bits_stride, fast_fetch_r5g6b5, NULL },
3174 { PIXMAN_r5g6b5, FAST_PATH_STD_DEST_FLAGS,
3175 ITER_NARROW | ITER_DEST,
3176 _pixman_iter_init_bits_stride,
3177 fast_fetch_r5g6b5, fast_write_back_r5g6b5 },
3179 { PIXMAN_r5g6b5, FAST_PATH_STD_DEST_FLAGS,
3180 ITER_NARROW | ITER_DEST | ITER_IGNORE_RGB | ITER_IGNORE_ALPHA,
3181 _pixman_iter_init_bits_stride,
3182 fast_dest_fetch_noop, fast_write_back_r5g6b5 },
3185 (FAST_PATH_STANDARD_FLAGS |
3186 FAST_PATH_SCALE_TRANSFORM |
3187 FAST_PATH_BILINEAR_FILTER |
3188 FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR),
3189 ITER_NARROW | ITER_SRC,
3190 fast_bilinear_cover_iter_init,
3194 #define FAST_BILINEAR_FLAGS \
3195 (FAST_PATH_NO_ALPHA_MAP | \
3196 FAST_PATH_NO_ACCESSORS | \
3197 FAST_PATH_HAS_TRANSFORM | \
3198 FAST_PATH_AFFINE_TRANSFORM | \
3199 FAST_PATH_X_UNIT_POSITIVE | \
3200 FAST_PATH_Y_UNIT_ZERO | \
3201 FAST_PATH_NONE_REPEAT | \
3202 FAST_PATH_BILINEAR_FILTER)
3205 FAST_BILINEAR_FLAGS,
3206 ITER_NARROW | ITER_SRC,
3207 NULL, bits_image_fetch_bilinear_no_repeat_8888, NULL
3211 FAST_BILINEAR_FLAGS,
3212 ITER_NARROW | ITER_SRC,
3213 NULL, bits_image_fetch_bilinear_no_repeat_8888, NULL
3216 #define GENERAL_BILINEAR_FLAGS \
3217 (FAST_PATH_NO_ALPHA_MAP | \
3218 FAST_PATH_NO_ACCESSORS | \
3219 FAST_PATH_HAS_TRANSFORM | \
3220 FAST_PATH_AFFINE_TRANSFORM | \
3221 FAST_PATH_BILINEAR_FILTER)
3223 #define GENERAL_NEAREST_FLAGS \
3224 (FAST_PATH_NO_ALPHA_MAP | \
3225 FAST_PATH_NO_ACCESSORS | \
3226 FAST_PATH_HAS_TRANSFORM | \
3227 FAST_PATH_AFFINE_TRANSFORM | \
3228 FAST_PATH_NEAREST_FILTER)
3230 #define GENERAL_SEPARABLE_CONVOLUTION_FLAGS \
3231 (FAST_PATH_NO_ALPHA_MAP | \
3232 FAST_PATH_NO_ACCESSORS | \
3233 FAST_PATH_HAS_TRANSFORM | \
3234 FAST_PATH_AFFINE_TRANSFORM | \
3235 FAST_PATH_SEPARABLE_CONVOLUTION_FILTER)
3237 #define SEPARABLE_CONVOLUTION_AFFINE_FAST_PATH(name, format, repeat) \
3238 { PIXMAN_ ## format, \
3239 GENERAL_SEPARABLE_CONVOLUTION_FLAGS | FAST_PATH_ ## repeat ## _REPEAT, \
3240 ITER_NARROW | ITER_SRC, \
3241 NULL, bits_image_fetch_separable_convolution_affine_ ## name, NULL \
3244 #define BILINEAR_AFFINE_FAST_PATH(name, format, repeat) \
3245 { PIXMAN_ ## format, \
3246 GENERAL_BILINEAR_FLAGS | FAST_PATH_ ## repeat ## _REPEAT, \
3247 ITER_NARROW | ITER_SRC, \
3248 NULL, bits_image_fetch_bilinear_affine_ ## name, NULL, \
3251 #define NEAREST_AFFINE_FAST_PATH(name, format, repeat) \
3252 { PIXMAN_ ## format, \
3253 GENERAL_NEAREST_FLAGS | FAST_PATH_ ## repeat ## _REPEAT, \
3254 ITER_NARROW | ITER_SRC, \
3255 NULL, bits_image_fetch_nearest_affine_ ## name, NULL \
3258 #define AFFINE_FAST_PATHS(name, format, repeat) \
3259 SEPARABLE_CONVOLUTION_AFFINE_FAST_PATH(name, format, repeat) \
3260 BILINEAR_AFFINE_FAST_PATH(name, format, repeat) \
3261 NEAREST_AFFINE_FAST_PATH(name, format, repeat)
3263 AFFINE_FAST_PATHS (pad_a8r8g8b8, a8r8g8b8, PAD)
3264 AFFINE_FAST_PATHS (none_a8r8g8b8, a8r8g8b8, NONE)
3265 AFFINE_FAST_PATHS (reflect_a8r8g8b8, a8r8g8b8, REFLECT)
3266 AFFINE_FAST_PATHS (normal_a8r8g8b8, a8r8g8b8, NORMAL)
3267 AFFINE_FAST_PATHS (pad_x8r8g8b8, x8r8g8b8, PAD)
3268 AFFINE_FAST_PATHS (none_x8r8g8b8, x8r8g8b8, NONE)
3269 AFFINE_FAST_PATHS (reflect_x8r8g8b8, x8r8g8b8, REFLECT)
3270 AFFINE_FAST_PATHS (normal_x8r8g8b8, x8r8g8b8, NORMAL)
3271 AFFINE_FAST_PATHS (pad_a8, a8, PAD)
3272 AFFINE_FAST_PATHS (none_a8, a8, NONE)
3273 AFFINE_FAST_PATHS (reflect_a8, a8, REFLECT)
3274 AFFINE_FAST_PATHS (normal_a8, a8, NORMAL)
3275 AFFINE_FAST_PATHS (pad_r5g6b5, r5g6b5, PAD)
3276 AFFINE_FAST_PATHS (none_r5g6b5, r5g6b5, NONE)
3277 AFFINE_FAST_PATHS (reflect_r5g6b5, r5g6b5, REFLECT)
3278 AFFINE_FAST_PATHS (normal_r5g6b5, r5g6b5, NORMAL)
3283 pixman_implementation_t *
3284 _pixman_implementation_create_fast_path (pixman_implementation_t *fallback)
3286 pixman_implementation_t *imp = _pixman_implementation_create (fallback, c_fast_paths);
3288 imp->fill = fast_path_fill;
3289 imp->iter_info = fast_iters;