diff options
author | Koen Kooi <koen@openembedded.org> | 2009-09-03 21:33:55 +0200 |
---|---|---|
committer | Koen Kooi <koen@openembedded.org> | 2009-09-03 21:33:55 +0200 |
commit | 5366c510565553f17b28ed936b34dfa4d3026b21 (patch) | |
tree | 6e43d1f714e3fd9bea76e97588cc0cd0278970d6 | |
parent | 19e6e8e30aaf2a791a657d752649865754963ce6 (diff) |
pixman git: add some more NEON and fastpath patches
-rw-r--r-- | recipes/xorg-lib/pixman/nearest-neighbour.patch | 1040 | ||||
-rw-r--r-- | recipes/xorg-lib/pixman/over-8888-0565.patch | 296 | ||||
-rw-r--r-- | recipes/xorg-lib/pixman/pixman-28986.patch | 32 | ||||
-rw-r--r-- | recipes/xorg-lib/pixman/remove-broken.patch | 826 | ||||
-rw-r--r-- | recipes/xorg-lib/pixman_git.bb | 10 |
5 files changed, 2201 insertions, 3 deletions
diff --git a/recipes/xorg-lib/pixman/nearest-neighbour.patch b/recipes/xorg-lib/pixman/nearest-neighbour.patch new file mode 100644 index 0000000000..29b140faf9 --- /dev/null +++ b/recipes/xorg-lib/pixman/nearest-neighbour.patch @@ -0,0 +1,1040 @@ +From: Siarhei Siamashka <siarhei.siamashka@nokia.com> +Date: Fri, 17 Jul 2009 10:22:23 +0000 (+0300) +Subject: Fastpath for nearest neighbour scaled compositing operations. +X-Git-Url: http://siarhei.siamashka.name/gitweb/?p=pixman.git;a=commitdiff_plain;h=247531c6978725a88fd3706129b9d3e339026f54 + +Fastpath for nearest neighbour scaled compositing operations. + +OVER 8888x8888, OVER 8888x0565, SRC 8888x8888, SRC 8888x0565 +and SRC 0565x0565 cases are supported. +--- + +diff --git a/pixman/pixman-fast-path.c b/pixman/pixman-fast-path.c +index 7f80578..7f3a6ad 100644 +--- a/pixman/pixman-fast-path.c ++++ b/pixman/pixman-fast-path.c +@@ -1261,6 +1261,993 @@ fast_composite_src_scale_nearest (pixman_implementation_t *imp, + } + } + ++/* ++ * Functions, which implement the core inner loops for the nearest neighbour ++ * scaled fastpath compositing operations. The do not need to do clipping ++ * checks, also the loops are unrolled to process two pixels per iteration ++ * for better performance on most CPU architectures (superscalar processors ++ * can issue several operations simultaneously, other processors can hide ++ * instructions latencies by pipelining operations). Unrolling more ++ * does not make much sense because the compiler will start running out ++ * of spare registers soon. ++ */ ++ ++#undef READ ++#undef WRITE ++#define READ(img,x) (*(x)) ++#define WRITE(img,ptr,v) ((*(ptr)) = (v)) ++ ++#define UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(x, a, y) do { \ ++ UN8x4_MUL_UN8_ADD_UN8x4(x, a, y); \ ++ x = CONVERT_8888_TO_0565(x); \ ++ } while (0) ++ ++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipOver_8888x0565 ( ++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst, ++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y) ++{ ++ uint16_t *dstLine; ++ uint32_t *srcFirstLine; ++ uint32_t d; ++ uint32_t s1, s2; ++ uint8_t a1, a2; ++ int w; ++ int x1, x2, y; ++ int32_t orig_vx = vx; ++ ++ uint32_t *src; ++ uint16_t *dst; ++ int srcStride, dstStride; ++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1); ++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be ++ * transformed from destination space to source space */ ++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1); ++ ++ while (--height >= 0) ++ { ++ dst = dstLine; ++ dstLine += dstStride; ++ ++ y = vy >> 16; ++ vy += unit_y; ++ ++ if ((y < 0) || (y >= pSrc->bits.height)) { ++ continue; ++ } ++ ++ src = srcFirstLine + srcStride * y; ++ ++ w = width; ++ vx = orig_vx; ++ while ((w -= 2) >= 0) ++ { ++ x1 = vx >> 16; ++ vx += unit_x; ++ s1 = READ(pSrc, src + x1); ++ ++ x2 = vx >> 16; ++ vx += unit_x; ++ s2 = READ(pSrc, src + x2); ++ ++ a1 = s1 >> 24; ++ a2 = s2 >> 24; ++ ++ if (a1 == 0xff) ++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1)); ++ else if (s1) { ++ d = CONVERT_0565_TO_0888(READ(pDst, dst)); ++ a1 ^= 0xff; ++ UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(d, a1, s1); ++ WRITE(pDst, dst, d); ++ } ++ dst++; ++ ++ if (a2 == 0xff) ++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s2)); ++ else if (s2) { ++ d = CONVERT_0565_TO_0888(READ(pDst, dst)); ++ a2 ^= 0xff; ++ UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(d, a2, s2); ++ WRITE(pDst, dst, d); ++ } ++ dst++; ++ } ++ if (w & 1) { ++ x1 = vx >> 16; ++ vx += unit_x; ++ s1 = READ(pSrc, src + x1); ++ ++ a1 = s1 >> 24; ++ if (a1 == 0xff) ++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1)); ++ else if (s1) { ++ d = CONVERT_0565_TO_0888(READ(pDst, dst)); ++ a1 ^= 0xff; ++ UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(d, a1, s1); ++ WRITE(pDst, dst, d); ++ } ++ dst++; ++ } ++ } ++} ++ ++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatOver_8888x0565 ( ++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst, ++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y) ++{ ++ uint16_t *dstLine; ++ uint32_t *srcFirstLine; ++ uint32_t d; ++ uint32_t s1, s2; ++ uint8_t a1, a2; ++ int w; ++ int x1, x2, y; ++ int32_t orig_vx = vx; ++ int32_t max_vx, max_vy; ++ ++ uint32_t *src; ++ uint16_t *dst; ++ int srcStride, dstStride; ++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1); ++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be ++ * transformed from destination space to source space */ ++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1); ++ ++ max_vx = pSrc->bits.width << 16; ++ max_vy = pSrc->bits.height << 16; ++ ++ while (orig_vx < 0) orig_vx += max_vx; ++ while (vy < 0) vy += max_vy; ++ while (orig_vx >= max_vx) orig_vx -= max_vx; ++ while (vy >= max_vy) vy -= max_vy; ++ ++ while (--height >= 0) ++ { ++ dst = dstLine; ++ dstLine += dstStride; ++ ++ y = vy >> 16; ++ vy += unit_y; ++ while (vy >= max_vy) vy -= max_vy; ++ ++ src = srcFirstLine + srcStride * y; ++ ++ w = width; ++ vx = orig_vx; ++ while ((w -= 2) >= 0) ++ { ++ x1 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s1 = READ(pSrc, src + x1); ++ ++ x2 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s2 = READ(pSrc, src + x2); ++ ++ a1 = s1 >> 24; ++ a2 = s2 >> 24; ++ ++ if (a1 == 0xff) ++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1)); ++ else if (s1) { ++ d = CONVERT_0565_TO_0888(READ(pDst, dst)); ++ a1 ^= 0xff; ++ UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(d, a1, s1); ++ WRITE(pDst, dst, d); ++ } ++ dst++; ++ ++ if (a2 == 0xff) ++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s2)); ++ else if (s2) { ++ d = CONVERT_0565_TO_0888(READ(pDst, dst)); ++ a2 ^= 0xff; ++ UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(d, a2, s2); ++ WRITE(pDst, dst, d); ++ } ++ dst++; ++ } ++ if (w & 1) { ++ x1 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s1 = READ(pSrc, src + x1); ++ ++ a1 = s1 >> 24; ++ if (a1 == 0xff) ++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1)); ++ else if (s1) { ++ d = CONVERT_0565_TO_0888(READ(pDst, dst)); ++ a1 ^= 0xff; ++ UN8x4_MUL_UN8_ADD_UN8x4_store_r5g6b5(d, a1, s1); ++ WRITE(pDst, dst, d); ++ } ++ dst++; ++ } ++ } ++} ++ ++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipOver_8888x8888 ( ++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst, ++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y) ++{ ++ uint32_t *dstLine; ++ uint32_t *srcFirstLine; ++ uint32_t d; ++ uint32_t s1, s2; ++ uint8_t a1, a2; ++ int w; ++ int x1, x2, y; ++ int32_t orig_vx = vx; ++ ++ uint32_t *src, *dst; ++ int srcStride, dstStride; ++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1); ++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be ++ * transformed from destination space to source space */ ++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1); ++ ++ while (--height >= 0) ++ { ++ dst = dstLine; ++ dstLine += dstStride; ++ ++ y = vy >> 16; ++ vy += unit_y; ++ ++ if ((y < 0) || (y >= pSrc->bits.height)) { ++ continue; ++ } ++ ++ src = srcFirstLine + srcStride * y; ++ ++ w = width; ++ vx = orig_vx; ++ while ((w -= 2) >= 0) ++ { ++ x1 = vx >> 16; ++ vx += unit_x; ++ s1 = READ(pSrc, src + x1); ++ ++ x2 = vx >> 16; ++ vx += unit_x; ++ s2 = READ(pSrc, src + x2); ++ ++ a1 = s1 >> 24; ++ a2 = s2 >> 24; ++ ++ if (a1 == 0xff) ++ WRITE(pDst, dst, s1); ++ else if (s1) { ++ d = READ(pDst, dst); ++ a1 ^= 0xff; ++ UN8x4_MUL_UN8_ADD_UN8x4(d, a1, s1); ++ WRITE(pDst, dst, d); ++ } ++ dst++; ++ ++ if (a2 == 0xff) ++ WRITE(pDst, dst, s2); ++ else if (s2) { ++ d = READ(pDst, dst); ++ a2 ^= 0xff; ++ UN8x4_MUL_UN8_ADD_UN8x4(d, a2, s2); ++ WRITE(pDst, dst, d); ++ } ++ dst++; ++ } ++ if (w & 1) { ++ x1 = vx >> 16; ++ vx += unit_x; ++ s1 = READ(pSrc, src + x1); ++ ++ a1 = s1 >> 24; ++ if (a1 == 0xff) ++ WRITE(pDst, dst, s1); ++ else if (s1) { ++ d = READ(pDst, dst); ++ a1 ^= 0xff; ++ UN8x4_MUL_UN8_ADD_UN8x4(d, a1, s1); ++ WRITE(pDst, dst, d); ++ } ++ dst++; ++ } ++ } ++} ++ ++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatOver_8888x8888 ( ++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst, ++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y) ++{ ++ uint32_t *dstLine; ++ uint32_t *srcFirstLine; ++ uint32_t d; ++ uint32_t s1, s2; ++ uint8_t a1, a2; ++ int w; ++ int x1, x2, y; ++ int32_t orig_vx = vx; ++ int32_t max_vx, max_vy; ++ ++ uint32_t *src, *dst; ++ int srcStride, dstStride; ++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1); ++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be ++ * transformed from destination space to source space */ ++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1); ++ ++ max_vx = pSrc->bits.width << 16; ++ max_vy = pSrc->bits.height << 16; ++ ++ while (orig_vx < 0) orig_vx += max_vx; ++ while (vy < 0) vy += max_vy; ++ while (orig_vx >= max_vx) orig_vx -= max_vx; ++ while (vy >= max_vy) vy -= max_vy; ++ ++ while (--height >= 0) ++ { ++ dst = dstLine; ++ dstLine += dstStride; ++ ++ y = vy >> 16; ++ vy += unit_y; ++ while (vy >= max_vy) vy -= max_vy; ++ ++ src = srcFirstLine + srcStride * y; ++ ++ w = width; ++ vx = orig_vx; ++ while ((w -= 2) >= 0) ++ { ++ x1 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s1 = READ(pSrc, src + x1); ++ ++ x2 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s2 = READ(pSrc, src + x2); ++ ++ a1 = s1 >> 24; ++ a2 = s2 >> 24; ++ ++ if (a1 == 0xff) ++ WRITE(pDst, dst, s1); ++ else if (s1) { ++ d = READ(pDst, dst); ++ a1 ^= 0xff; ++ UN8x4_MUL_UN8_ADD_UN8x4(d, a1, s1); ++ WRITE(pDst, dst, d); ++ } ++ dst++; ++ ++ if (a2 == 0xff) ++ WRITE(pDst, dst, s2); ++ else if (s2) { ++ d = READ(pDst, dst); ++ a2 ^= 0xff; ++ UN8x4_MUL_UN8_ADD_UN8x4(d, a2, s2); ++ WRITE(pDst, dst, d); ++ } ++ dst++; ++ } ++ if (w & 1) { ++ x1 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s1 = READ(pSrc, src + x1); ++ ++ a1 = s1 >> 24; ++ if (a1 == 0xff) ++ WRITE(pDst, dst, s1); ++ else if (s1) { ++ d = READ(pDst, dst); ++ a1 ^= 0xff; ++ UN8x4_MUL_UN8_ADD_UN8x4(d, a1, s1); ++ WRITE(pDst, dst, d); ++ } ++ dst++; ++ } ++ } ++} ++ ++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipSrc_8888x8888 ( ++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst, ++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y) ++{ ++ uint32_t *dstLine; ++ uint32_t *srcFirstLine; ++ uint32_t s1, s2; ++ int w; ++ int x1, x2, y; ++ int32_t orig_vx = vx; ++ ++ uint32_t *src, *dst; ++ int srcStride, dstStride; ++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1); ++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be ++ * transformed from destination space to source space */ ++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1); ++ ++ while (--height >= 0) ++ { ++ dst = dstLine; ++ dstLine += dstStride; ++ ++ y = vy >> 16; ++ vy += unit_y; ++ ++ if ((y < 0) || (y >= pSrc->bits.height)) { ++ memset(dst, 0, width * sizeof(*dst)); ++ continue; ++ } ++ ++ src = srcFirstLine + srcStride * y; ++ ++ w = width; ++ vx = orig_vx; ++ while ((w -= 2) >= 0) ++ { ++ x1 = vx >> 16; ++ vx += unit_x; ++ s1 = READ(pSrc, src + x1); ++ ++ x2 = vx >> 16; ++ vx += unit_x; ++ s2 = READ(pSrc, src + x2); ++ ++ WRITE(pDst, dst, s1); ++ dst++; ++ WRITE(pDst, dst, s2); ++ dst++; ++ } ++ if (w & 1) { ++ x1 = vx >> 16; ++ vx += unit_x; ++ s1 = READ(pSrc, src + x1); ++ WRITE(pDst, dst, s1); ++ dst++; ++ } ++ } ++} ++ ++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatSrc_8888x8888 ( ++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst, ++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y) ++{ ++ uint32_t *dstLine; ++ uint32_t *srcFirstLine; ++ uint32_t s1, s2; ++ int w; ++ int x1, x2, y; ++ int32_t orig_vx = vx; ++ int32_t max_vx, max_vy; ++ ++ uint32_t *src, *dst; ++ int srcStride, dstStride; ++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1); ++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be ++ * transformed from destination space to source space */ ++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1); ++ ++ max_vx = pSrc->bits.width << 16; ++ max_vy = pSrc->bits.height << 16; ++ ++ while (orig_vx < 0) orig_vx += max_vx; ++ while (vy < 0) vy += max_vy; ++ while (orig_vx >= max_vx) orig_vx -= max_vx; ++ while (vy >= max_vy) vy -= max_vy; ++ ++ while (--height >= 0) ++ { ++ dst = dstLine; ++ dstLine += dstStride; ++ ++ y = vy >> 16; ++ vy += unit_y; ++ while (vy >= max_vy) vy -= max_vy; ++ ++ src = srcFirstLine + srcStride * y; ++ ++ w = width; ++ vx = orig_vx; ++ while ((w -= 2) >= 0) ++ { ++ x1 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s1 = READ(pSrc, src + x1); ++ ++ x2 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s2 = READ(pSrc, src + x2); ++ ++ WRITE(pDst, dst, s1); ++ dst++; ++ WRITE(pDst, dst, s2); ++ dst++; ++ } ++ if (w & 1) { ++ x1 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s1 = READ(pSrc, src + x1); ++ ++ WRITE(pDst, dst, s1); ++ dst++; ++ } ++ } ++} ++ ++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipSrc_0565x0565 ( ++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst, ++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y) ++{ ++ uint16_t *dstLine; ++ uint16_t *srcFirstLine; ++ uint16_t s1, s2; ++ int w; ++ int x1, x2, y; ++ int32_t orig_vx = vx; ++ ++ uint16_t *src, *dst; ++ int srcStride, dstStride; ++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1); ++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be ++ * transformed from destination space to source space */ ++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint16_t, srcStride, srcFirstLine, 1); ++ ++ while (--height >= 0) ++ { ++ dst = dstLine; ++ dstLine += dstStride; ++ ++ y = vy >> 16; ++ vy += unit_y; ++ ++ if ((y < 0) || (y >= pSrc->bits.height)) { ++ memset(dst, 0, width * sizeof(*dst)); ++ continue; ++ } ++ ++ src = srcFirstLine + srcStride * y; ++ ++ w = width; ++ vx = orig_vx; ++ while ((w -= 2) >= 0) ++ { ++ x1 = vx >> 16; ++ vx += unit_x; ++ s1 = READ(pSrc, src + x1); ++ ++ x2 = vx >> 16; ++ vx += unit_x; ++ s2 = READ(pSrc, src + x2); ++ ++ WRITE(pDst, dst, s1); ++ dst++; ++ WRITE(pDst, dst, s2); ++ dst++; ++ } ++ if (w & 1) { ++ x1 = vx >> 16; ++ vx += unit_x; ++ s1 = READ(pSrc, src + x1); ++ WRITE(pDst, dst, s1); ++ dst++; ++ } ++ } ++} ++ ++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatSrc_0565x0565 ( ++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst, ++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y) ++{ ++ uint16_t *dstLine; ++ uint16_t *srcFirstLine; ++ uint16_t s1, s2; ++ int w; ++ int x1, x2, y; ++ int32_t orig_vx = vx; ++ int32_t max_vx, max_vy; ++ ++ uint16_t *src, *dst; ++ int srcStride, dstStride; ++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1); ++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be ++ * transformed from destination space to source space */ ++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint16_t, srcStride, srcFirstLine, 1); ++ ++ max_vx = pSrc->bits.width << 16; ++ max_vy = pSrc->bits.height << 16; ++ ++ while (orig_vx < 0) orig_vx += max_vx; ++ while (vy < 0) vy += max_vy; ++ while (orig_vx >= max_vx) orig_vx -= max_vx; ++ while (vy >= max_vy) vy -= max_vy; ++ ++ while (--height >= 0) ++ { ++ dst = dstLine; ++ dstLine += dstStride; ++ ++ y = vy >> 16; ++ vy += unit_y; ++ while (vy >= max_vy) vy -= max_vy; ++ ++ src = srcFirstLine + srcStride * y; ++ ++ w = width; ++ vx = orig_vx; ++ while ((w -= 2) >= 0) ++ { ++ x1 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s1 = READ(pSrc, src + x1); ++ ++ x2 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s2 = READ(pSrc, src + x2); ++ ++ WRITE(pDst, dst, s1); ++ dst++; ++ WRITE(pDst, dst, s2); ++ dst++; ++ } ++ if (w & 1) { ++ x1 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s1 = READ(pSrc, src + x1); ++ ++ WRITE(pDst, dst, s1); ++ dst++; ++ } ++ } ++} ++ ++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipSrc_8888x0565 ( ++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst, ++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y) ++{ ++ uint16_t *dstLine; ++ uint32_t *srcFirstLine; ++ uint32_t s1, s2; ++ int w; ++ int x1, x2, y; ++ int32_t orig_vx = vx; ++ ++ uint32_t *src; ++ uint16_t *dst; ++ int srcStride, dstStride; ++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1); ++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be ++ * transformed from destination space to source space */ ++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1); ++ ++ while (--height >= 0) ++ { ++ dst = dstLine; ++ dstLine += dstStride; ++ ++ y = vy >> 16; ++ vy += unit_y; ++ ++ if ((y < 0) || (y >= pSrc->bits.height)) { ++ memset(dst, 0, width * sizeof(*dst)); ++ continue; ++ } ++ ++ src = srcFirstLine + srcStride * y; ++ ++ w = width; ++ vx = orig_vx; ++ while ((w -= 2) >= 0) ++ { ++ x1 = vx >> 16; ++ vx += unit_x; ++ s1 = READ(pSrc, src + x1); ++ ++ x2 = vx >> 16; ++ vx += unit_x; ++ s2 = READ(pSrc, src + x2); ++ ++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1)); ++ dst++; ++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s2)); ++ dst++; ++ } ++ if (w & 1) { ++ x1 = vx >> 16; ++ vx += unit_x; ++ s1 = READ(pSrc, src + x1); ++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1)); ++ dst++; ++ } ++ } ++} ++ ++static void fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatSrc_8888x0565 ( ++ pixman_image_t *pSrc, pixman_image_t *pDst, int xSrc, int ySrc, int xDst, int yDst, ++ int width, int height, int32_t vx, int32_t vy, int32_t unit_x, int32_t unit_y) ++{ ++ uint16_t *dstLine; ++ uint32_t *srcFirstLine; ++ uint32_t s1, s2; ++ int w; ++ int x1, x2, y; ++ int32_t orig_vx = vx; ++ int32_t max_vx, max_vy; ++ ++ uint32_t *src; ++ uint16_t *dst; ++ int srcStride, dstStride; ++ PIXMAN_IMAGE_GET_LINE (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1); ++ /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be ++ * transformed from destination space to source space */ ++ PIXMAN_IMAGE_GET_LINE (pSrc, 0, 0, uint32_t, srcStride, srcFirstLine, 1); ++ ++ max_vx = pSrc->bits.width << 16; ++ max_vy = pSrc->bits.height << 16; ++ ++ while (orig_vx < 0) orig_vx += max_vx; ++ while (vy < 0) vy += max_vy; ++ while (orig_vx >= max_vx) orig_vx -= max_vx; ++ while (vy >= max_vy) vy -= max_vy; ++ ++ while (--height >= 0) ++ { ++ dst = dstLine; ++ dstLine += dstStride; ++ ++ y = vy >> 16; ++ vy += unit_y; ++ while (vy >= max_vy) vy -= max_vy; ++ ++ src = srcFirstLine + srcStride * y; ++ ++ w = width; ++ vx = orig_vx; ++ while ((w -= 2) >= 0) ++ { ++ x1 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s1 = READ(pSrc, src + x1); ++ ++ x2 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s2 = READ(pSrc, src + x2); ++ ++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1)); ++ dst++; ++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s2)); ++ dst++; ++ } ++ if (w & 1) { ++ x1 = vx >> 16; ++ vx += unit_x; ++ while (vx >= max_vx) vx -= max_vx; ++ s1 = READ(pSrc, src + x1); ++ ++ WRITE(pDst, dst, CONVERT_8888_TO_0565(s1)); ++ dst++; ++ } ++ } ++} ++ ++/* ++ * Check if the clipping boundary is crossed on horizontal scaling ++ */ ++static inline pixman_bool_t ++fbTransformVerifyHorizontalClipping(pixman_image_t *pict, int width, int32_t vx, int32_t unit_x) ++{ ++ while (--width >= 0) { ++ int x = vx >> 16; ++ if ((x < 0) || (x >= pict->bits.width)) return 1; ++ vx += unit_x; ++ } ++ return 0; ++} ++ ++/* ++ * Check if the clipping boundary is crossed on vertical scaling ++ */ ++static inline pixman_bool_t ++fbTransformVerifyVerticalClipping(pixman_image_t *pict, int height, int32_t vy, int32_t unit_y) ++{ ++ while (--height >= 0) { ++ int y = vy >> 16; ++ if ((y < 0) || (y >= pict->bits.height)) return 1; ++ vy += unit_y; ++ } ++ return 0; ++} ++ ++/* ++ * Easy case of transform without rotation or complex clipping ++ * Returns 1 in the case if it was able to handle this operation and 0 otherwise ++ */ ++static pixman_bool_t ++fbCompositeTransformNonrotatedAffineTrivialclip ( ++ pixman_op_t op, ++ pixman_image_t *pSrc, ++ pixman_image_t *pMask, ++ pixman_image_t *pDst, ++ int16_t xSrc, ++ int16_t ySrc, ++ int16_t xMask, ++ int16_t yMask, ++ int16_t xDst, ++ int16_t yDst, ++ uint16_t width, ++ uint16_t height) ++{ ++ pixman_vector_t v, unit; ++ int skipdst_x = 0, skipdst_y = 0; ++ ++ /* Handle destination clipping */ ++ if (xDst < pDst->common.clip_region.extents.x1) { ++ skipdst_x = pDst->common.clip_region.extents.x1 - xDst; ++ if (skipdst_x >= (int)width) ++ return 1; ++ xDst = pDst->common.clip_region.extents.x1; ++ width -= skipdst_x; ++ } ++ ++ if (yDst < pDst->common.clip_region.extents.y1) { ++ skipdst_y = pDst->common.clip_region.extents.y1 - yDst; ++ if (skipdst_y >= (int)height) ++ return 1; ++ yDst = pDst->common.clip_region.extents.y1; ++ height -= skipdst_y; ++ } ++ ++ if (xDst >= pDst->common.clip_region.extents.x2 || ++ yDst >= pDst->common.clip_region.extents.y2) ++ { ++ return 1; ++ } ++ ++ if (xDst + width > pDst->common.clip_region.extents.x2) ++ width = pDst->common.clip_region.extents.x2 - xDst; ++ if (yDst + height > pDst->common.clip_region.extents.y2) ++ height = pDst->common.clip_region.extents.y2 - yDst; ++ ++ /* reference point is the center of the pixel */ ++ v.vector[0] = pixman_int_to_fixed(xSrc) + pixman_fixed_1 / 2; ++ v.vector[1] = pixman_int_to_fixed(ySrc) + pixman_fixed_1 / 2; ++ v.vector[2] = pixman_fixed_1; ++ ++ if (!pixman_transform_point_3d (pSrc->common.transform, &v)) ++ return 0; ++ ++ /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */ ++ v.vector[0] -= pixman_fixed_e; ++ v.vector[1] -= pixman_fixed_e; ++ ++ unit.vector[0] = pSrc->common.transform->matrix[0][0]; ++ unit.vector[1] = pSrc->common.transform->matrix[1][1]; ++ ++ v.vector[0] += unit.vector[0] * skipdst_x; ++ v.vector[1] += unit.vector[1] * skipdst_y; ++ ++ /* Check for possible fixed point arithmetics problems/overflows */ ++ if (unit.vector[0] <= 0 || unit.vector[1] <= 0) ++ return 0; ++ if (width == 0 || height == 0) ++ return 0; ++ if ((uint32_t)width + (unit.vector[0] >> 16) >= 0x7FFF) ++ return 0; ++ if ((uint32_t)height + (unit.vector[1] >> 16) >= 0x7FFF) ++ return 0; ++ ++ /* Horizontal source clipping is only supported for NORMAL repeat */ ++ if (pSrc->common.repeat != PIXMAN_REPEAT_NORMAL ++ && fbTransformVerifyHorizontalClipping(pSrc, width, v.vector[0], unit.vector[0])) { ++ return 0; ++ } ++ ++ /* Vertical source clipping is only supported for NONE and NORMAL repeat */ ++ if (pSrc->common.repeat != PIXMAN_REPEAT_NONE && pSrc->common.repeat != PIXMAN_REPEAT_NORMAL ++ && fbTransformVerifyVerticalClipping(pSrc, height, v.vector[1], unit.vector[1])) { ++ return 0; ++ } ++ ++ if (op == PIXMAN_OP_OVER && pSrc->bits.format == PIXMAN_a8r8g8b8 ++ && (pDst->bits.format == PIXMAN_x8r8g8b8 || pDst->bits.format == PIXMAN_a8r8g8b8)) ++ { ++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat != PIXMAN_REPEAT_NORMAL) { ++ fbCompositeTransformNearestNonrotatedAffineTrivialclipOver_8888x8888( ++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height, ++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]); ++ return 1; ++ } ++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat == PIXMAN_REPEAT_NORMAL) { ++ fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatOver_8888x8888( ++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height, ++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]); ++ return 1; ++ } ++ } ++ ++ if (op == PIXMAN_OP_SRC && (pSrc->bits.format == PIXMAN_x8r8g8b8 || pSrc->bits.format == PIXMAN_a8r8g8b8) ++ && (pDst->bits.format == PIXMAN_x8r8g8b8 || pDst->bits.format == pSrc->bits.format)) ++ { ++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat != PIXMAN_REPEAT_NORMAL) { ++ fbCompositeTransformNearestNonrotatedAffineTrivialclipSrc_8888x8888( ++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height, ++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]); ++ return 1; ++ } ++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat == PIXMAN_REPEAT_NORMAL) { ++ fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatSrc_8888x8888( ++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height, ++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]); ++ return 1; ++ } ++ } ++ ++ if (op == PIXMAN_OP_OVER && pSrc->bits.format == PIXMAN_a8r8g8b8 && pDst->bits.format == PIXMAN_r5g6b5) ++ { ++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat != PIXMAN_REPEAT_NORMAL) { ++ fbCompositeTransformNearestNonrotatedAffineTrivialclipOver_8888x0565( ++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height, ++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]); ++ return 1; ++ } ++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat == PIXMAN_REPEAT_NORMAL) { ++ fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatOver_8888x0565( ++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height, ++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]); ++ return 1; ++ } ++ } ++ ++ if (op == PIXMAN_OP_SRC && pSrc->bits.format == PIXMAN_r5g6b5 && pDst->bits.format == PIXMAN_r5g6b5) ++ { ++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat != PIXMAN_REPEAT_NORMAL) { ++ fbCompositeTransformNearestNonrotatedAffineTrivialclipSrc_0565x0565( ++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height, ++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]); ++ return 1; ++ } ++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat == PIXMAN_REPEAT_NORMAL) { ++ fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatSrc_0565x0565( ++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height, ++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]); ++ return 1; ++ } ++ } ++ ++ if (op == PIXMAN_OP_SRC && (pSrc->bits.format == PIXMAN_x8r8g8b8 || pSrc->bits.format == PIXMAN_a8r8g8b8) ++ && pDst->bits.format == PIXMAN_r5g6b5) ++ { ++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat != PIXMAN_REPEAT_NORMAL) { ++ fbCompositeTransformNearestNonrotatedAffineTrivialclipSrc_8888x0565( ++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height, ++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]); ++ return 1; ++ } ++ if (pSrc->common.filter == PIXMAN_FILTER_NEAREST && pSrc->common.repeat == PIXMAN_REPEAT_NORMAL) { ++ fbCompositeTransformNearestNonrotatedAffineTrivialclipRepeatSrc_8888x0565( ++ pSrc, pDst, xSrc, ySrc, xDst, yDst, width, height, ++ v.vector[0], v.vector[1], unit.vector[0], unit.vector[1]); ++ return 1; ++ } ++ } ++ ++ /* No fastpath scaling implemented for this case */ ++ return 0; ++} ++ + static void + fast_path_composite (pixman_implementation_t *imp, + pixman_op_t op, +@@ -1279,6 +2266,30 @@ fast_path_composite (pixman_implementation_t *imp, + if (src->type == BITS + && src->common.transform + && !mask ++ && !src->common.alpha_map && !dest->common.alpha_map ++ && (src->common.filter == PIXMAN_FILTER_NEAREST) ++ && !src->bits.read_func && !src->bits.write_func ++ && !dest->bits.read_func && !dest->bits.write_func) ++ { ++ /* ensure that the transform matrix only has a scale */ ++ if (src->common.transform->matrix[0][1] == 0 && ++ src->common.transform->matrix[1][0] == 0 && ++ src->common.transform->matrix[2][0] == 0 && ++ src->common.transform->matrix[2][1] == 0 && ++ src->common.transform->matrix[2][2] == pixman_fixed_1 && ++ dest->common.clip_region.data == NULL) ++ { ++ if (fbCompositeTransformNonrotatedAffineTrivialclip (op, src, mask, dest, ++ src_x, src_y, mask_x, mask_y, dest_x, dest_y, width, height)) ++ { ++ return; ++ } ++ } ++ } ++ ++ if (src->type == BITS ++ && src->common.transform ++ && !mask + && op == PIXMAN_OP_SRC + && !src->common.alpha_map && !dest->common.alpha_map + && (src->common.filter == PIXMAN_FILTER_NEAREST) diff --git a/recipes/xorg-lib/pixman/over-8888-0565.patch b/recipes/xorg-lib/pixman/over-8888-0565.patch new file mode 100644 index 0000000000..3e27094022 --- /dev/null +++ b/recipes/xorg-lib/pixman/over-8888-0565.patch @@ -0,0 +1,296 @@ +From: Siarhei Siamashka <siarhei.siamashka@nokia.com> +Date: Mon, 27 Jul 2009 04:48:04 +0000 (+0300) +Subject: ARM: NEON optimized version of composite_over_8888_0565 +X-Git-Url: http://siarhei.siamashka.name/gitweb/?p=pixman.git;a=commitdiff_plain;h=17d8ab82858511f212dfb30c347255393eb12b0c + +ARM: NEON optimized version of composite_over_8888_0565 +--- + +diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c +index 9404c70..f1dcf1f 100644 +--- a/pixman/pixman-arm-neon.c ++++ b/pixman/pixman-arm-neon.c +@@ -1447,6 +1447,274 @@ neon_composite_src_16_16 (pixman_implementation_t * impl, + } + } + ++static inline void ++neon_composite_over_8888_0565_internal (uint32_t *src, ++ uint16_t *dst, ++ int32_t w, ++ int32_t h, ++ int32_t src_stride, ++ int32_t dst_stride) ++{ ++ int32_t dst_newline_delta = (dst_stride - w) * 2; ++ int32_t src_newline_delta = (src_stride - w) * 4; ++ asm volatile ( ++ ++ ".macro process_pixblock_head size\n" ++ /* load pixel data from memory */ ++ " .if \\size == 8\n" ++ " vld1.32 {d0, d1, d2, d3}, [%[src]]!\n" ++ " vld1.16 {d4, d5}, [%[dst_r]]!\n" ++ " .elseif \\size == 4\n" ++ " vld1.32 {d0, d1}, [%[src]]!\n" ++ " vld1.16 {d4}, [%[dst_r]]!\n" ++ " .elseif \\size == 2\n" ++ " vld1.32 {d0}, [%[src]]!\n" ++ " vld1.16 {d4[0]}, [%[dst_r]]!\n" ++ " vld1.16 {d4[1]}, [%[dst_r]]!\n" ++ " .elseif \\size == 1\n" ++ " vld1.32 {d0[0]}, [%[src]]!\n" ++ " vld1.16 {d4[0]}, [%[dst_r]]!\n" ++ " .endif\n" ++ /* deinterleave and convert both source and destination ++ to "planar" 8-bit format */ ++ " vshrn.u16 d16, q2, #8\n" ++ " vuzp.8 d0, d1\n" ++ " vshrn.u16 d17, q2, #3\n" ++ " vuzp.8 d2, d3\n" ++ " vsli.u16 q2, q2, #5\n" ++ " vuzp.8 d1, d3\n" ++ " vsri.u8 d16, d16, #5\n" ++ " vuzp.8 d0, d2\n" ++ " vmvn.8 d3, d3\n" ++ " vsri.u8 d17, d17, #6\n" ++ " vshrn.u16 d18, q2, #2\n" ++ /* source: d0 - blue, d1 - green, d2 - red, d3 - alpha */ ++ /* destination: d16 - red, d17 - green, d18 - blue */ ++ /* now do alpha blending */ ++ " vmull.u8 q10, d3, d16\n" ++ "pld [%[src], #128]\n" ++ " vmull.u8 q11, d3, d17\n" ++ "pld [%[dst_r], #64]\n" ++ " vmull.u8 q12, d3, d18\n" ++ " vrshr.u16 q13, q10, #8\n" ++ " vrshr.u16 q8, q11, #8\n" ++ " vrshr.u16 q9, q12, #8\n" ++ " vraddhn.u16 d20, q10, q13\n" ++ " vraddhn.u16 d21, q11, q8\n" ++ " vraddhn.u16 d22, q12, q9\n" ++ ".endm\n" ++ ++ ".macro process_pixblock_tail size\n" ++ /* result is ready in d28, d29, d30 (R, G, B) */ ++ " vqadd.u8 d28, d2, d20\n" ++ " vqadd.u8 d29, d1, d21\n" ++ " vqadd.u8 d30, d0, d22\n" ++ /* convert it to r5g6b5 */ ++ " vshll.u8 q3, d28, #8\n" ++ " vshll.u8 q14, d29, #8\n" ++ " vshll.u8 q15, d30, #8\n" ++ " vsri.u16 q3, q14, #5\n" ++ " vsri.u16 q3, q15, #11\n" ++ /* store pixel data to memory */ ++ " .if \\size == 8\n" ++ " vst1.16 {d6, d7}, [%[dst_w], :128]!\n" ++ " .elseif \\size == 4\n" ++ " vst1.16 {d6}, [%[dst_w]]!\n" ++ " .elseif \\size == 2\n" ++ " vst1.16 {d6[0]}, [%[dst_w]]!\n" ++ " vst1.16 {d6[1]}, [%[dst_w]]!\n" ++ " .elseif \\size == 1\n" ++ " vst1.16 {d6[0]}, [%[dst_w]]!\n" ++ " .endif\n" ++ ".endm\n" ++ ++ /* "tail" of the previous block and "head" of the next block ++ are merged and interleaved for better instructions scheduling */ ++ ".macro process_pixblock_tail_head_8\n" ++ " vqadd.u8 d28, d2, d20\n" ++ " vld1.16 {d4, d5}, [%[dst_r], :128]!\n" ++ " vqadd.u8 d29, d1, d21\n" /* TODO: try to join these into a */ ++ " vqadd.u8 d30, d0, d22\n" /* single 128-bit operation */ ++ " vshrn.u16 d16, q2, #8\n" ++ " vld1.32 {d0, d1, d2, d3}, [%[src]]!\n" /* TODO: maybe split */ ++ " vshrn.u16 d17, q2, #3\n" ++ " vsli.u16 q2, q2, #5\n" ++ " vuzp.8 d0, d1\n" ++ " vshll.u8 q3, d28, #8\n" ++ " vuzp.8 d2, d3\n" ++ " vshll.u8 q14, d29, #8\n" ++ " vuzp.8 d1, d3\n" ++ " vsri.u8 d16, d16, #5\n" ++ " vuzp.8 d0, d2\n" ++ " vmvn.8 d3, d3\n" ++ " vsri.u8 d17, d17, #6\n" ++ " vshrn.u16 d18, q2, #2\n" ++ " vmull.u8 q10, d3, d16\n" ++ "pld [%[src], #128]\n" ++ " vmull.u8 q11, d3, d17\n" ++ "pld [%[dst_r], #64]\n" ++ " vmull.u8 q12, d3, d18\n" ++ " vsri.u16 d6, d28, #5\n" ++ " vsri.u16 d7, d29, #5\n" ++ " vshll.u8 q15, d30, #8\n" ++ " vrshr.u16 q13, q10, #8\n" ++ " vrshr.u16 q8, q11, #8\n" ++ " vrshr.u16 q9, q12, #8\n" ++ " vsri.u16 d6, d30, #11\n" ++ " vsri.u16 d7, d31, #11\n" ++ " vraddhn.u16 d20, q10, q13\n" ++ " vraddhn.u16 d21, q11, q8\n" ++ " vraddhn.u16 d22, q12, q9\n" ++ " vst1.16 {d6, d7}, [%[dst_w], :128]!\n" ++ ".endm\n" ++ ++ "subs %[h], %[h], #1\n" ++ "blt 9f\n" ++ "0:\n" ++ "cmp %[w], #8\n" ++ "blt 8f\n" ++ ++ /* ensure 16 byte alignment of the destination buffer */ ++ "tst %[dst_r], #0xF\n" ++ "beq 2f\n" ++ "tst %[dst_r], #2\n" ++ "beq 1f\n" ++ "vld1.32 {d3[0]}, [%[src]]!\n" ++ "vld1.16 {d5[2]}, [%[dst_r]]!\n" ++ "sub %[w], %[w], #1\n" ++ "1:\n" ++ "tst %[dst_r], #4\n" ++ "beq 1f\n" ++ "vld1.32 {d2}, [%[src]]!\n" ++ "vld1.16 {d5[0]}, [%[dst_r]]!\n" ++ "vld1.16 {d5[1]}, [%[dst_r]]!\n" ++ "sub %[w], %[w], #2\n" ++ "1:\n" ++ "tst %[dst_r], #8\n" ++ "beq 1f\n" ++ "vld1.32 {d0, d1}, [%[src]]!\n" ++ "vld1.16 {d4}, [%[dst_r]]!\n" ++ "sub %[w], %[w], #4\n" ++ "1:\n" ++ "process_pixblock_head -1\n" ++ "process_pixblock_tail -1\n" ++ "tst %[dst_w], #2\n" ++ "beq 1f\n" ++ "vst1.16 {d7[2]}, [%[dst_w]]!\n" ++ "1:\n" ++ "tst %[dst_w], #4\n" ++ "beq 1f\n" ++ "vst1.16 {d7[0]}, [%[dst_w]]!\n" ++ "vst1.16 {d7[1]}, [%[dst_w]]!\n" ++ "1:\n" ++ "tst %[dst_w], #8\n" ++ "beq 2f\n" ++ "vst1.16 {d6}, [%[dst_w]]!\n" ++ "2:\n" ++ ++ "subs %[w], %[w], #8\n" ++ "blt 8f\n" ++ "process_pixblock_head 8\n" ++ "subs %[w], %[w], #8\n" ++ "blt 2f\n" ++ "1:\n" /* innermost pipelined loop */ ++ "process_pixblock_tail_head_8\n" ++ "subs %[w], %[w], #8\n" ++ "bge 1b\n" ++ "2:\n" ++ "process_pixblock_tail 8\n" ++ ++ "8:\n" ++ /* process up to 7 remaining pixels */ ++ "tst %[w], #7\n" ++ "beq 2f\n" ++ "tst %[w], #4\n" ++ "beq 1f\n" ++ "vld1.32 {d0, d1}, [%[src]]!\n" ++ "vld1.16 {d4}, [%[dst_r]]!\n" ++ "1:\n" ++ "tst %[w], #2\n" ++ "beq 1f\n" ++ "vld1.32 {d2}, [%[src]]!\n" ++ "vld1.16 {d5[0]}, [%[dst_r]]!\n" ++ "vld1.16 {d5[1]}, [%[dst_r]]!\n" ++ "1:\n" ++ "tst %[w], #1\n" ++ "beq 1f\n" ++ "vld1.32 {d3[0]}, [%[src]]!\n" ++ "vld1.16 {d5[2]}, [%[dst_r]]!\n" ++ "1:\n" ++ ++ "process_pixblock_head -1\n" ++ "process_pixblock_tail -1\n" ++ ++ "tst %[w], #4\n" ++ "beq 1f\n" ++ "vst1.16 {d6}, [%[dst_w]]!\n" ++ "1:\n" ++ "tst %[w], #2\n" ++ "beq 1f\n" ++ "vst1.16 {d7[0]}, [%[dst_w]]!\n" ++ "vst1.16 {d7[1]}, [%[dst_w]]!\n" ++ "1:\n" ++ "tst %[w], #1\n" ++ "beq 2f\n" ++ "vst1.16 {d7[2]}, [%[dst_w]]!\n" ++ "2:\n" ++ ++ "add %[src], %[src], %[src_newline_delta]\n" ++ "add %[dst_r], %[dst_r], %[dst_newline_delta]\n" ++ "add %[dst_w], %[dst_w], %[dst_newline_delta]\n" ++ "mov %[w], %[orig_w]\n" ++ "subs %[h], %[h], #1\n" ++ "bge 0b\n" ++ "9:\n" ++ ".purgem process_pixblock_head\n" ++ ".purgem process_pixblock_tail\n" ++ ".purgem process_pixblock_tail_head_8\n" ++ ++ : [src] "+&r" (src), [dst_r] "+&r" (dst), [dst_w] "+&r" (dst), ++ [w] "+&r" (w), [h] "+&r" (h) ++ : [dst_newline_delta] "r" (dst_newline_delta), ++ [src_newline_delta] "r" (src_newline_delta), [orig_w] "r" (w) ++ : "cc", "memory", ++ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", ++ /* "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", */ ++ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", ++ "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31" ++ ); ++} ++ ++static void ++neon_composite_over_8888_0565 (pixman_implementation_t *imp, ++ pixman_op_t op, ++ pixman_image_t * src_image, ++ pixman_image_t * mask_image, ++ pixman_image_t * dst_image, ++ int32_t src_x, ++ int32_t src_y, ++ int32_t mask_x, ++ int32_t mask_y, ++ int32_t dest_x, ++ int32_t dest_y, ++ int32_t width, ++ int32_t height) ++{ ++ uint16_t *dst_line; ++ uint32_t *src_line; ++ int32_t dst_stride, src_stride; ++ ++ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); ++ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); ++ ++ neon_composite_over_8888_0565_internal (src_line, ++ dst_line, ++ width, ++ height, ++ src_stride, ++ dst_stride); ++} ++ + #endif /* USE_GCC_INLINE_ASM */ + + static void +@@ -1908,6 +2176,8 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] = + #ifdef USE_GCC_INLINE_ASM + { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_16_16, 0 }, + { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_16_16, 0 }, ++ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_8888_0565, 0 }, ++ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_over_8888_0565, 0 }, + #endif + { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, neon_composite_over_8888_8888, 0 }, + { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_over_8888_8888, 0 }, diff --git a/recipes/xorg-lib/pixman/pixman-28986.patch b/recipes/xorg-lib/pixman/pixman-28986.patch new file mode 100644 index 0000000000..f5ba4c302e --- /dev/null +++ b/recipes/xorg-lib/pixman/pixman-28986.patch @@ -0,0 +1,32 @@ +From 7b7860d61fb1526acdf010dd8fd644bbf1396b9e Mon Sep 17 00:00:00 2001 +From: Siarhei Siamashka <siarhei.siamashka@nokia.com> +Date: Fri, 28 Aug 2009 22:34:21 +0300 +Subject: [PATCH] ARM: workaround for gcc bug in vshll_n_u8 intrinsic + +Some versions of gcc (cs2009q1, 4.4.1) incorrectly reject +shift operand having value >= 8, claiming that it is out of +range. So inline assembly is used as a workaround. +--- + pixman/pixman-arm-neon.c | 6 ++++++ + 1 files changed, 6 insertions(+), 0 deletions(-) + +diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c +index 4125d1b..3e7f566 100644 +--- a/pixman/pixman-arm-neon.c ++++ b/pixman/pixman-arm-neon.c +@@ -64,6 +64,12 @@ unpack0565 (uint16x8_t rgb) + return res; + } + ++#ifdef USE_GCC_INLINE_ASM ++/* Some versions of gcc have problems with vshll_n_u8 intrinsic (Bug 23576) */ ++#define vshll_n_u8(a, n) ({ uint16x8_t r; \ ++ asm ("vshll.u8 %q0, %P1, %2\n" : "=w" (r) : "w" (a), "i" (n)); r; }) ++#endif ++ + static force_inline uint16x8_t + pack0565 (uint8x8x4_t s) + { +-- +1.5.4.3 + diff --git a/recipes/xorg-lib/pixman/remove-broken.patch b/recipes/xorg-lib/pixman/remove-broken.patch new file mode 100644 index 0000000000..fd025b4bbd --- /dev/null +++ b/recipes/xorg-lib/pixman/remove-broken.patch @@ -0,0 +1,826 @@ +From: Siarhei Siamashka <siarhei.siamashka@nokia.com> +Date: Sun, 26 Jul 2009 22:21:26 +0000 (+0300) +Subject: ARM: Removal of unused/broken NEON code +X-Git-Url: http://siarhei.siamashka.name/gitweb/?p=pixman.git;a=commitdiff_plain;h=7ef2322eefcccc28a2d45c0da22c0fee88b8f464 + +ARM: Removal of unused/broken NEON code +--- + +diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c +index 4125d1b..9404c70 100644 +--- a/pixman/pixman-arm-neon.c ++++ b/pixman/pixman-arm-neon.c +@@ -1895,710 +1895,6 @@ pixman_fill_neon (uint32_t *bits, + #endif + } + +-/* TODO: is there a more generic way of doing this being introduced? */ +-#define NEON_SCANLINE_BUFFER_PIXELS (1024) +- +-static inline void +-neon_quadword_copy (void * dst, +- void * src, +- uint32_t count, /* of quadwords */ +- uint32_t trailer_count /* of bytes */) +-{ +- uint8_t *t_dst = dst, *t_src = src; +- +- /* Uses aligned multi-register loads to maximise read bandwidth +- * on uncached memory such as framebuffers +- * The accesses do not have the aligned qualifiers, so that the copy +- * may convert between aligned-uncached and unaligned-cached memory. +- * It is assumed that the CPU can infer alignedness from the address. +- */ +- +-#ifdef USE_GCC_INLINE_ASM +- +- asm volatile ( +- " cmp %[count], #8 \n" +- " blt 1f @ skip oversized fragments \n" +- "0: @ start with eight quadwords at a time \n" +- " sub %[count], %[count], #8 \n" +- " vld1.8 {d16, d17, d18, d19}, [%[src]]! \n" +- " vld1.8 {d20, d21, d22, d23}, [%[src]]! \n" +- " vld1.8 {d24, d25, d26, d27}, [%[src]]! \n" +- " vld1.8 {d28, d29, d30, d31}, [%[src]]! \n" +- " cmp %[count], #8 \n" +- " vst1.8 {d16, d17, d18, d19}, [%[dst]]! \n" +- " vst1.8 {d20, d21, d22, d23}, [%[dst]]! \n" +- " vst1.8 {d24, d25, d26, d27}, [%[dst]]! \n" +- " vst1.8 {d28, d29, d30, d31}, [%[dst]]! \n" +- " bge 0b \n" +- "1: @ four quadwords \n" +- " tst %[count], #4 \n" +- " beq 2f @ skip oversized fragment \n" +- " vld1.8 {d16, d17, d18, d19}, [%[src]]! \n" +- " vld1.8 {d20, d21, d22, d23}, [%[src]]! \n" +- " vst1.8 {d16, d17, d18, d19}, [%[dst]]! \n" +- " vst1.8 {d20, d21, d22, d23}, [%[dst]]! \n" +- "2: @ two quadwords \n" +- " tst %[count], #2 \n" +- " beq 3f @ skip oversized fragment \n" +- " vld1.8 {d16, d17, d18, d19}, [%[src]]! \n" +- " vst1.8 {d16, d17, d18, d19}, [%[dst]]! \n" +- "3: @ one quadword \n" +- " tst %[count], #1 \n" +- " beq 4f @ skip oversized fragment \n" +- " vld1.8 {d16, d17}, [%[src]]! \n" +- " vst1.8 {d16, d17}, [%[dst]]! \n" +- "4: @ end \n" +- +- /* Clobbered input registers marked as input/outputs */ +- : [dst] "+r" (t_dst), [src] "+r" (t_src), [count] "+r" (count) +- +- /* No unclobbered inputs */ +- : +- +- /* Clobbered vector registers */ +- : "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", +- "d26", "d27", "d28", "d29", "d30", "d31", "cc", "memory"); +- +-#else +- +- while (count >= 8) +- { +- uint8x16x4_t t1 = vld4q_u8 (t_src); +- uint8x16x4_t t2 = vld4q_u8 (t_src + sizeof(uint8x16x4_t)); +- +- t_src += sizeof(uint8x16x4_t) * 2; +- vst4q_u8 (t_dst, t1); +- vst4q_u8 (t_dst + sizeof(uint8x16x4_t), t2); +- t_dst += sizeof(uint8x16x4_t) * 2; +- count -= 8; +- } +- +- if (count & 4) +- { +- uint8x16x4_t t1 = vld4q_u8 (t_src); +- +- t_src += sizeof(uint8x16x4_t); +- vst4q_u8 (t_dst, t1); +- t_dst += sizeof(uint8x16x4_t); +- } +- +- if (count & 2) +- { +- uint8x8x4_t t1 = vld4_u8 (t_src); +- +- t_src += sizeof(uint8x8x4_t); +- vst4_u8 (t_dst, t1); +- t_dst += sizeof(uint8x8x4_t); +- } +- +- if (count & 1) +- { +- uint8x16_t t1 = vld1q_u8 (t_src); +- +- t_src += sizeof(uint8x16_t); +- vst1q_u8 (t_dst, t1); +- t_dst += sizeof(uint8x16_t); +- } +- +-#endif /* !USE_GCC_INLINE_ASM */ +- +- if (trailer_count) +- { +- if (trailer_count & 8) +- { +- uint8x8_t t1 = vld1_u8 (t_src); +- +- t_src += sizeof(uint8x8_t); +- vst1_u8 (t_dst, t1); +- t_dst += sizeof(uint8x8_t); +- } +- +- if (trailer_count & 4) +- { +- *((uint32_t*) t_dst) = *((uint32_t*) t_src); +- +- t_dst += 4; +- t_src += 4; +- } +- +- if (trailer_count & 2) +- { +- *((uint16_t*) t_dst) = *((uint16_t*) t_src); +- +- t_dst += 2; +- t_src += 2; +- } +- +- if (trailer_count & 1) +- { +- *t_dst++ = *t_src++; +- } +- } +-} +- +-static inline void +-solid_over_565_8_pix_neon (uint32_t glyph_colour, +- uint16_t *dest, +- uint8_t * in_mask, +- uint32_t dest_stride, /* bytes, not elements */ +- uint32_t mask_stride, +- uint32_t count /* 8-pixel groups */) +-{ +- /* Inner loop of glyph blitter (solid colour, alpha mask) */ +- +-#ifdef USE_GCC_INLINE_ASM +- +- asm volatile ( +- " vld4.8 {d20[], d21[], d22[], d23[]}, [%[glyph_colour]] @ splat solid colour components \n" +- "0: @ loop \n" +- " vld1.16 {d0, d1}, [%[dest]] @ load first pixels from framebuffer \n" +- " vld1.8 {d17}, [%[in_mask]] @ load alpha mask of glyph \n" +- " vmull.u8 q9, d17, d23 @ apply glyph colour alpha to mask \n" +- " vshrn.u16 d17, q9, #8 @ reformat it to match original mask \n" +- " vmvn d18, d17 @ we need the inverse mask for the background \n" +- " vsli.u16 q3, q0, #5 @ duplicate framebuffer blue bits \n" +- " vshrn.u16 d2, q0, #8 @ unpack red from framebuffer pixels \n" +- " vshrn.u16 d4, q0, #3 @ unpack green \n" +- " vsri.u8 d2, d2, #5 @ duplicate red bits (extend 5 to 8) \n" +- " vshrn.u16 d6, q3, #2 @ unpack extended blue (truncate 10 to 8) \n" +- " vsri.u8 d4, d4, #6 @ duplicate green bits (extend 6 to 8) \n" +- " vmull.u8 q1, d2, d18 @ apply inverse mask to background red... \n" +- " vmull.u8 q2, d4, d18 @ ...green... \n" +- " vmull.u8 q3, d6, d18 @ ...blue \n" +- " subs %[count], %[count], #1 @ decrement/test loop counter \n" +- " vmlal.u8 q1, d17, d22 @ add masked foreground red... \n" +- " vmlal.u8 q2, d17, d21 @ ...green... \n" +- " vmlal.u8 q3, d17, d20 @ ...blue \n" +- " add %[in_mask], %[in_mask], %[mask_stride] @ advance mask pointer, while we wait \n" +- " vsri.16 q1, q2, #5 @ pack green behind red \n" +- " vsri.16 q1, q3, #11 @ pack blue into pixels \n" +- " vst1.16 {d2, d3}, [%[dest]] @ store composited pixels \n" +- " add %[dest], %[dest], %[dest_stride] @ advance framebuffer pointer \n" +- " bne 0b @ next please \n" +- +- /* Clobbered registers marked as input/outputs */ +- : [dest] "+r" (dest), [in_mask] "+r" (in_mask), [count] "+r" (count) +- +- /* Inputs */ +- : [dest_stride] "r" (dest_stride), [mask_stride] "r" (mask_stride), [glyph_colour] "r" (&glyph_colour) +- +- /* Clobbers, including the inputs we modify, and potentially lots of memory */ +- : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d17", "d18", "d19", +- "d20", "d21", "d22", "d23", "d24", "d25", "cc", "memory" +- ); +- +-#else +- +- uint8x8x4_t solid_colour = vld4_dup_u8 ((uint8_t*) &glyph_colour); +- +- while (count--) +- { +- uint16x8_t pixels = vld1q_u16 (dest); +- uint8x8_t mask = vshrn_n_u16 (vmull_u8 (solid_colour.val[3], vld1_u8 (in_mask)), 8); +- uint8x8_t mask_image = vmvn_u8 (mask); +- +- uint8x8_t t_red = vshrn_n_u16 (pixels, 8); +- uint8x8_t t_green = vshrn_n_u16 (pixels, 3); +- uint8x8_t t_blue = vshrn_n_u16 (vsli_n_u8 (pixels, pixels, 5), 2); +- +- uint16x8_t s_red = vmull_u8 (vsri_n_u8 (t_red, t_red, 5), mask_image); +- uint16x8_t s_green = vmull_u8 (vsri_n_u8 (t_green, t_green, 6), mask_image); +- uint16x8_t s_blue = vmull_u8 (t_blue, mask_image); +- +- s_red = vmlal (s_red, mask, solid_colour.val[2]); +- s_green = vmlal (s_green, mask, solid_colour.val[1]); +- s_blue = vmlal (s_blue, mask, solid_colour.val[0]); +- +- pixels = vsri_n_u16 (s_red, s_green, 5); +- pixels = vsri_n_u16 (pixels, s_blue, 11); +- vst1q_u16 (dest, pixels); +- +- dest += dest_stride; +- mask += mask_stride; +- } +- +-#endif +-} +- +-#if 0 /* this is broken currently */ +-static void +-neon_composite_over_n_8_0565 (pixman_implementation_t * impl, +- pixman_op_t op, +- pixman_image_t * src_image, +- pixman_image_t * mask_image, +- pixman_image_t * dst_image, +- int32_t src_x, +- int32_t src_y, +- int32_t mask_x, +- int32_t mask_y, +- int32_t dest_x, +- int32_t dest_y, +- int32_t width, +- int32_t height) +-{ +- uint32_t src, srca; +- uint16_t *dst_line, *aligned_line; +- uint8_t *mask_line; +- uint32_t dst_stride, mask_stride; +- uint32_t kernel_count, copy_count, copy_tail; +- uint8_t kernel_offset, copy_offset; +- +- src = _pixman_image_get_solid (src_image, dst_image->bits.format); +- +- /* bail out if fully transparent or degenerate */ +- srca = src >> 24; +- if (src == 0) +- return; +- +- if (width == 0 || height == 0) +- return; +- +- if (width > NEON_SCANLINE_BUFFER_PIXELS) +- { +- /* split the blit, so we can use a fixed-size scanline buffer +- * TODO: there must be a more elegant way of doing this. +- */ +- int x; +- for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) +- { +- neon_composite_over_n_8_0565 ( +- impl, op, +- src_image, mask_image, dst_image, +- src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y, +- (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height); +- } +- +- return; +- } +- +- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); +- PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); +- +- /* keep within minimum number of aligned quadwords on width +- * while also keeping the minimum number of columns to process +- */ +- { +- unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF; +- unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF; +- unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF; +- +- /* the fast copy should be quadword aligned */ +- copy_offset = dst_line - ((uint16_t*) aligned_left); +- aligned_line = dst_line - copy_offset; +- copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4); +- copy_tail = 0; +- +- if (aligned_right - aligned_left > ceiling_length) +- { +- /* unaligned routine is tightest */ +- kernel_count = (uint32_t) (ceiling_length >> 4); +- kernel_offset = copy_offset; +- } +- else +- { +- /* aligned routine is equally tight, so it is safer to align */ +- kernel_count = copy_count; +- kernel_offset = 0; +- } +- +- /* We should avoid reading beyond scanline ends for safety */ +- if (aligned_line < (dst_line - dest_x) || +- (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width)) +- { +- /* switch to precise read */ +- copy_offset = kernel_offset = 0; +- aligned_line = dst_line; +- kernel_count = (uint32_t) (ceiling_length >> 4); +- copy_count = (width * sizeof(*dst_line)) >> 4; +- copy_tail = (width * sizeof(*dst_line)) & 0xF; +- } +- } +- +- { +- uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; /* deliberately not initialised */ +- uint8_t glyph_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; +- int y = height; +- +- /* row-major order */ +- /* left edge, middle block, right edge */ +- for ( ; y--; mask_line += mask_stride, aligned_line += dst_stride, dst_line += dst_stride) +- { +- /* We don't want to overrun the edges of the glyph, +- * so realign the edge data into known buffers +- */ +- neon_quadword_copy (glyph_line + copy_offset, mask_line, width >> 4, width & 0xF); +- +- /* Uncached framebuffer access is really, really slow +- * if we do it piecemeal. It should be much faster if we +- * grab it all at once. One scanline should easily fit in +- * L1 cache, so this should not waste RAM bandwidth. +- */ +- neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail); +- +- /* Apply the actual filter */ +- solid_over_565_8_pix_neon ( +- src, scan_line + kernel_offset, +- glyph_line + kernel_offset, 8 * sizeof(*dst_line), +- 8, kernel_count); +- +- /* Copy the modified scanline back */ +- neon_quadword_copy (dst_line, scan_line + copy_offset, +- width >> 3, (width & 7) * 2); +- } +- } +-} +-#endif +- +-#ifdef USE_GCC_INLINE_ASM +- +-static inline void +-plain_over_565_8_pix_neon (uint32_t colour, +- uint16_t *dest, +- uint32_t dest_stride, /* bytes, not elements */ +- uint32_t count /* 8-pixel groups */) +-{ +- /* Inner loop for plain translucent rects +- * (solid colour without alpha mask) +- */ +- asm volatile ( +- " vld4.8 {d20[], d21[], d22[], d23[]}, [%[colour]] @ solid colour load/splat \n" +- " vmull.u8 q12, d23, d22 @ premultiply alpha red \n" +- " vmull.u8 q13, d23, d21 @ premultiply alpha green \n" +- " vmull.u8 q14, d23, d20 @ premultiply alpha blue \n" +- " vmvn d18, d23 @ inverse alpha for background \n" +- "0: @ loop\n" +- " vld1.16 {d0, d1}, [%[dest]] @ load first pixels from framebuffer \n" +- " vshrn.u16 d2, q0, #8 @ unpack red from framebuffer pixels \n" +- " vshrn.u16 d4, q0, #3 @ unpack green \n" +- " vsli.u16 q3, q0, #5 @ duplicate framebuffer blue bits \n" +- " vsri.u8 d2, d2, #5 @ duplicate red bits (extend 5 to 8) \n" +- " vsri.u8 d4, d4, #6 @ duplicate green bits (extend 6 to 8) \n" +- " vshrn.u16 d6, q3, #2 @ unpack extended blue (truncate 10 to 8) \n" +- " vmov q0, q12 @ retrieve foreground red \n" +- " vmlal.u8 q0, d2, d18 @ blend red - my kingdom for a four-operand MLA \n" +- " vmov q1, q13 @ retrieve foreground green \n" +- " vmlal.u8 q1, d4, d18 @ blend green \n" +- " vmov q2, q14 @ retrieve foreground blue \n" +- " vmlal.u8 q2, d6, d18 @ blend blue \n" +- " subs %[count], %[count], #1 @ decrement/test loop counter \n" +- " vsri.16 q0, q1, #5 @ pack green behind red \n" +- " vsri.16 q0, q2, #11 @ pack blue into pixels \n" +- " vst1.16 {d0, d1}, [%[dest]] @ store composited pixels \n" +- " add %[dest], %[dest], %[dest_stride] @ advance framebuffer pointer \n" +- " bne 0b @ next please \n" +- +- /* Clobbered registers marked as input/outputs */ +- : [dest] "+r" (dest), [count] "+r" (count) +- +- /* Inputs */ +- : [dest_stride] "r" (dest_stride), [colour] "r" (&colour) +- +- /* Clobbers, including the inputs we modify, and +- * potentially lots of memory +- */ +- : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d18", "d19", +- "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", +- "cc", "memory" +- ); +-} +- +-static void +-neon_composite_over_n_0565 (pixman_implementation_t * impl, +- pixman_op_t op, +- pixman_image_t * src_image, +- pixman_image_t * mask_image, +- pixman_image_t * dst_image, +- int32_t src_x, +- int32_t src_y, +- int32_t mask_x, +- int32_t mask_y, +- int32_t dest_x, +- int32_t dest_y, +- int32_t width, +- int32_t height) +-{ +- uint32_t src, srca; +- uint16_t *dst_line, *aligned_line; +- uint32_t dst_stride; +- uint32_t kernel_count, copy_count, copy_tail; +- uint8_t kernel_offset, copy_offset; +- +- src = _pixman_image_get_solid (src_image, dst_image->bits.format); +- +- /* bail out if fully transparent */ +- srca = src >> 24; +- if (src == 0) +- return; +- +- if (width == 0 || height == 0) +- return; +- +- if (width > NEON_SCANLINE_BUFFER_PIXELS) +- { +- /* split the blit, so we can use a fixed-size scanline buffer * +- * TODO: there must be a more elegant way of doing this. +- */ +- int x; +- +- for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) +- { +- neon_composite_over_n_0565 ( +- impl, op, +- src_image, mask_image, dst_image, +- src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y, +- (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height); +- } +- return; +- } +- +- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); +- +- /* keep within minimum number of aligned quadwords on width +- * while also keeping the minimum number of columns to process +- */ +- { +- unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF; +- unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF; +- unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF; +- +- /* the fast copy should be quadword aligned */ +- copy_offset = dst_line - ((uint16_t*) aligned_left); +- aligned_line = dst_line - copy_offset; +- copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4); +- copy_tail = 0; +- +- if (aligned_right - aligned_left > ceiling_length) +- { +- /* unaligned routine is tightest */ +- kernel_count = (uint32_t) (ceiling_length >> 4); +- kernel_offset = copy_offset; +- } +- else +- { +- /* aligned routine is equally tight, so it is safer to align */ +- kernel_count = copy_count; +- kernel_offset = 0; +- } +- +- /* We should avoid reading beyond scanline ends for safety */ +- if (aligned_line < (dst_line - dest_x) || +- (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width)) +- { +- /* switch to precise read */ +- copy_offset = kernel_offset = 0; +- aligned_line = dst_line; +- kernel_count = (uint32_t) (ceiling_length >> 4); +- copy_count = (width * sizeof(*dst_line)) >> 4; +- copy_tail = (width * sizeof(*dst_line)) & 0xF; +- } +- } +- +- { +- uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; /* deliberately not initialised */ +- +- /* row-major order */ +- /* left edge, middle block, right edge */ +- for ( ; height--; aligned_line += dst_stride, dst_line += dst_stride) +- { +- /* Uncached framebuffer access is really, really slow if we do it piecemeal. +- * It should be much faster if we grab it all at once. +- * One scanline should easily fit in L1 cache, so this should +- * not waste RAM bandwidth. +- */ +- neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail); +- +- /* Apply the actual filter */ +- plain_over_565_8_pix_neon ( +- src, scan_line + kernel_offset, 8 * sizeof(*dst_line), kernel_count); +- +- /* Copy the modified scanline back */ +- neon_quadword_copy ( +- dst_line, scan_line + copy_offset, width >> 3, (width & 7) * 2); +- } +- } +-} +- +-static inline void +-ARGB8_over_565_8_pix_neon (uint32_t *src, +- uint16_t *dest, +- uint32_t src_stride, /* bytes, not elements */ +- uint32_t count /* 8-pixel groups */) +-{ +- asm volatile ( +- "0: @ loop\n" +- " pld [%[src], %[src_stride]] @ preload from next scanline \n" +- " vld1.16 {d0, d1}, [%[dest]] @ load pixels from framebuffer \n" +- " vld4.8 {d20, d21, d22, d23},[%[src]]! @ load source image pixels \n" +- " vsli.u16 q3, q0, #5 @ duplicate framebuffer blue bits \n" +- " vshrn.u16 d2, q0, #8 @ unpack red from framebuffer pixels \n" +- " vshrn.u16 d4, q0, #3 @ unpack green \n" +- " vmvn d18, d23 @ we need the inverse alpha for the background \n" +- " vsri.u8 d2, d2, #5 @ duplicate red bits (extend 5 to 8) \n" +- " vshrn.u16 d6, q3, #2 @ unpack extended blue (truncate 10 to 8) \n" +- " vsri.u8 d4, d4, #6 @ duplicate green bits (extend 6 to 8) \n" +- " vmull.u8 q1, d2, d18 @ apply inverse alpha to background red... \n" +- " vmull.u8 q2, d4, d18 @ ...green... \n" +- " vmull.u8 q3, d6, d18 @ ...blue \n" +- " subs %[count], %[count], #1 @ decrement/test loop counter \n" +- " vmlal.u8 q1, d23, d22 @ add blended foreground red... \n" +- " vmlal.u8 q2, d23, d21 @ ...green... \n" +- " vmlal.u8 q3, d23, d20 @ ...blue \n" +- " vsri.16 q1, q2, #5 @ pack green behind red \n" +- " vsri.16 q1, q3, #11 @ pack blue into pixels \n" +- " vst1.16 {d2, d3}, [%[dest]]! @ store composited pixels \n" +- " bne 0b @ next please \n" +- +- /* Clobbered registers marked as input/outputs */ +- : [dest] "+r" (dest), [src] "+r" (src), [count] "+r" (count) +- +- /* Inputs */ +- : [src_stride] "r" (src_stride) +- +- /* Clobbers, including the inputs we modify, and potentially lots of memory */ +- : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d17", "d18", "d20", +- "d21", "d22", "d23", "cc", "memory" +- ); +-} +- +-static void +-neon_composite_over_8888_0565 (pixman_implementation_t * impl, +- pixman_op_t op, +- pixman_image_t * src_image, +- pixman_image_t * mask_image, +- pixman_image_t * dst_image, +- int32_t src_x, +- int32_t src_y, +- int32_t mask_x, +- int32_t mask_y, +- int32_t dest_x, +- int32_t dest_y, +- int32_t width, +- int32_t height) +-{ +- uint32_t *src_line; +- uint16_t *dst_line, *aligned_line; +- uint32_t dst_stride, src_stride; +- uint32_t kernel_count, copy_count, copy_tail; +- uint8_t kernel_offset, copy_offset; +- +- /* we assume mask is opaque +- * so the only alpha to deal with is embedded in src +- */ +- if (width > NEON_SCANLINE_BUFFER_PIXELS) +- { +- /* split the blit, so we can use a fixed-size scanline buffer */ +- int x; +- for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) +- { +- neon_composite_over_8888_0565 ( +- impl, op, +- src_image, mask_image, dst_image, +- src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y, +- (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height); +- } +- return; +- } +- +- PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); +- PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); +- +- /* keep within minimum number of aligned quadwords on width +- * while also keeping the minimum number of columns to process +- */ +- { +- unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF; +- unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF; +- unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF; +- +- /* the fast copy should be quadword aligned */ +- copy_offset = dst_line - ((uint16_t*) aligned_left); +- aligned_line = dst_line - copy_offset; +- copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4); +- copy_tail = 0; +- +- if (aligned_right - aligned_left > ceiling_length) +- { +- /* unaligned routine is tightest */ +- kernel_count = (uint32_t) (ceiling_length >> 4); +- kernel_offset = copy_offset; +- } +- else +- { +- /* aligned routine is equally tight, so it is safer to align */ +- kernel_count = copy_count; +- kernel_offset = 0; +- } +- +- /* We should avoid reading beyond scanline ends for safety */ +- if (aligned_line < (dst_line - dest_x) || +- (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width)) +- { +- /* switch to precise read */ +- copy_offset = kernel_offset = 0; +- aligned_line = dst_line; +- kernel_count = (uint32_t) (ceiling_length >> 4); +- copy_count = (width * sizeof(*dst_line)) >> 4; +- copy_tail = (width * sizeof(*dst_line)) & 0xF; +- } +- } +- +- /* Preload the first input scanline */ +- { +- uint8_t *src_ptr = (uint8_t*) src_line; +- uint32_t count = (width + 15) / 16; +- +-#ifdef USE_GCC_INLINE_ASM +- asm volatile ( +- "0: @ loop \n" +- " subs %[count], %[count], #1 \n" +- " pld [%[src]] \n" +- " add %[src], %[src], #64 \n" +- " bgt 0b \n" +- +- /* Clobbered input registers marked as input/outputs */ +- : [src] "+r" (src_ptr), [count] "+r" (count) +- : /* no unclobbered inputs */ +- : "cc" +- ); +-#else +- do +- { +- __pld (src_ptr); +- src_ptr += 64; +- } +- while (--count); +-#endif +- } +- +- { +- uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; /* deliberately not initialised */ +- +- /* row-major order */ +- /* left edge, middle block, right edge */ +- for ( ; height--; src_line += src_stride, aligned_line += dst_stride) +- { +- /* Uncached framebuffer access is really, really slow if we do +- * it piecemeal. It should be much faster if we grab it all at +- * once. One scanline should easily fit in L1 cache, so this +- * should not waste RAM bandwidth. +- */ +- neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail); +- +- /* Apply the actual filter */ +- ARGB8_over_565_8_pix_neon ( +- src_line, scan_line + kernel_offset, +- src_stride * sizeof(*src_line), kernel_count); +- +- /* Copy the modified scanline back */ +- neon_quadword_copy (dst_line, +- scan_line + copy_offset, +- width >> 3, (width & 7) * 2); +- } +- } +-} +- +-#endif /* USE_GCC_INLINE_ASM */ +- + static const pixman_fast_path_t arm_neon_fast_path_array[] = + { + { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, neon_composite_add_8888_8_8, 0 }, +@@ -2612,12 +1908,6 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] = + #ifdef USE_GCC_INLINE_ASM + { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_src_16_16, 0 }, + { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_src_16_16, 0 }, +-#if 0 /* this code has some bugs */ +- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_n_0565, 0 }, +- { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_over_n_0565, 0 }, +- { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_8888_0565, 0 }, +- { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_over_8888_0565, 0 }, +-#endif + #endif + { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, neon_composite_over_8888_8888, 0 }, + { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, neon_composite_over_8888_8888, 0 }, +@@ -2668,79 +1958,6 @@ arm_neon_composite (pixman_implementation_t *imp, + } + + static pixman_bool_t +-pixman_blt_neon (void *src_bits, +- void *dst_bits, +- int src_stride, +- int dst_stride, +- int src_bpp, +- int dst_bpp, +- int src_x, +- int src_y, +- int dst_x, +- int dst_y, +- int width, +- int height) +-{ +- if (!width || !height) +- return TRUE; +- +- /* accelerate only straight copies involving complete bytes */ +- if (src_bpp != dst_bpp || (src_bpp & 7)) +- return FALSE; +- +- { +- uint32_t bytes_per_pixel = src_bpp >> 3; +- uint32_t byte_width = width * bytes_per_pixel; +- /* parameter is in words for some reason */ +- int32_t src_stride_bytes = src_stride * 4; +- int32_t dst_stride_bytes = dst_stride * 4; +- uint8_t *src_bytes = ((uint8_t*) src_bits) + +- src_y * src_stride_bytes + src_x * bytes_per_pixel; +- uint8_t *dst_bytes = ((uint8_t*) dst_bits) + +- dst_y * dst_stride_bytes + dst_x * bytes_per_pixel; +- uint32_t quadword_count = byte_width / 16; +- uint32_t offset = byte_width % 16; +- +- while (height--) +- { +- neon_quadword_copy (dst_bytes, src_bytes, quadword_count, offset); +- src_bytes += src_stride_bytes; +- dst_bytes += dst_stride_bytes; +- } +- } +- +- return TRUE; +-} +- +-static pixman_bool_t +-arm_neon_blt (pixman_implementation_t *imp, +- uint32_t * src_bits, +- uint32_t * dst_bits, +- int src_stride, +- int dst_stride, +- int src_bpp, +- int dst_bpp, +- int src_x, +- int src_y, +- int dst_x, +- int dst_y, +- int width, +- int height) +-{ +- if (pixman_blt_neon ( +- src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp, +- src_x, src_y, dst_x, dst_y, width, height)) +- { +- return TRUE; +- } +- +- return _pixman_implementation_blt ( +- imp->delegate, +- src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp, +- src_x, src_y, dst_x, dst_y, width, height); +-} +- +-static pixman_bool_t + arm_neon_fill (pixman_implementation_t *imp, + uint32_t * bits, + int stride, +@@ -2765,9 +1982,6 @@ _pixman_implementation_create_arm_neon (void) + pixman_implementation_t *imp = _pixman_implementation_create (simd); + + imp->composite = arm_neon_composite; +-#if 0 /* this code has some bugs */ +- imp->blt = arm_neon_blt; +-#endif + imp->fill = arm_neon_fill; + + return imp; diff --git a/recipes/xorg-lib/pixman_git.bb b/recipes/xorg-lib/pixman_git.bb index 2ba06af67e..b8aa2a43c6 100644 --- a/recipes/xorg-lib/pixman_git.bb +++ b/recipes/xorg-lib/pixman_git.bb @@ -3,16 +3,20 @@ PRIORITY = "optional" DESCRIPTION = "Low-level pixel manipulation library." LICENSE = "X11" -PV = "0.15.16" -PR = "r1" +PV = "0.17.1" +PR = "r2" PR_append = "+gitr${SRCREV}" -SRCREV = "f9660ce29ed072c6cbaec711c5d18b9f0ba113ae" +SRCREV = "7af985a69a9147e54dd5946a8062dbc2e534b735" DEFAULT_PREFERENCE = "-1" DEFAULT_PREFERENCE_angstrom = "1" SRC_URI = "git://anongit.freedesktop.org/pixman;protocol=git \ + file://pixman-28986.patch;patch=1 \ + file://nearest-neighbour.patch;patch=1 \ + file://remove-broken.patch;patch=1 \ + file://over-8888-0565.patch;patch=1 \ " S = "${WORKDIR}/git" |