summaryrefslogtreecommitdiff
path: root/recipes/xorg-lib/pixman/0002-ARM-Introduction-of-the-new-framework-for-NEON-fast.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes/xorg-lib/pixman/0002-ARM-Introduction-of-the-new-framework-for-NEON-fast.patch')
-rw-r--r--recipes/xorg-lib/pixman/0002-ARM-Introduction-of-the-new-framework-for-NEON-fast.patch1061
1 files changed, 1061 insertions, 0 deletions
diff --git a/recipes/xorg-lib/pixman/0002-ARM-Introduction-of-the-new-framework-for-NEON-fast.patch b/recipes/xorg-lib/pixman/0002-ARM-Introduction-of-the-new-framework-for-NEON-fast.patch
new file mode 100644
index 0000000000..af0a8aa7a0
--- /dev/null
+++ b/recipes/xorg-lib/pixman/0002-ARM-Introduction-of-the-new-framework-for-NEON-fast.patch
@@ -0,0 +1,1061 @@
+From d9d9173581331a3bf7e5d123db32025588b7f044 Mon Sep 17 00:00:00 2001
+From: Siarhei Siamashka <siarhei.siamashka@nokia.com>
+Date: Sat, 10 Oct 2009 00:20:51 +0300
+Subject: [PATCH 2/7] ARM: Introduction of the new framework for NEON fast path optimizations
+
+GNU assembler and its macro preprocessor is now used to generate
+NEON optimized functions from a common template. This automatically
+takes care of nuisances like ensuring optimal alignment, dealing with
+leading/trailing pixels, doing prefetch, etc.
+
+As the first use for this framework, this commit also includes an
+implementation of pixman_composite_over_8888_0565_asm_neon function.
+---
+ configure.ac | 1 +
+ pixman/Makefile.am | 4 +-
+ pixman/pixman-arm-neon-asm.S | 309 +++++++++++++++++++++
+ pixman/pixman-arm-neon-asm.h | 620 ++++++++++++++++++++++++++++++++++++++++++
+ pixman/pixman-arm-neon.c | 55 ++++
+ 5 files changed, 988 insertions(+), 1 deletions(-)
+ create mode 100644 pixman/pixman-arm-neon-asm.S
+ create mode 100644 pixman/pixman-arm-neon-asm.h
+
+diff --git a/configure.ac b/configure.ac
+index c548174..522af15 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -71,6 +71,7 @@ AC_CANONICAL_HOST
+ test_CFLAGS=${CFLAGS+set} # We may override autoconf default CFLAGS.
+
+ AC_PROG_CC
++AM_PROG_AS
+ AC_PROG_LIBTOOL
+ AC_CHECK_FUNCS([getisax])
+ AC_C_BIGENDIAN
+diff --git a/pixman/Makefile.am b/pixman/Makefile.am
+index 6020623..2543c6a 100644
+--- a/pixman/Makefile.am
++++ b/pixman/Makefile.am
+@@ -109,7 +109,9 @@ endif
+ if USE_ARM_NEON
+ noinst_LTLIBRARIES += libpixman-arm-neon.la
+ libpixman_arm_neon_la_SOURCES = \
+- pixman-arm-neon.c
++ pixman-arm-neon.c \
++ pixman-arm-neon-asm.S \
++ pixman-arm-neon-asm.h
+ libpixman_arm_neon_la_CFLAGS = $(DEP_CFLAGS) $(ARM_NEON_CFLAGS)
+ libpixman_arm_neon_la_LIBADD = $(DEP_LIBS)
+ libpixman_1_la_LIBADD += libpixman-arm-neon.la
+diff --git a/pixman/pixman-arm-neon-asm.S b/pixman/pixman-arm-neon-asm.S
+new file mode 100644
+index 0000000..843899f
+--- /dev/null
++++ b/pixman/pixman-arm-neon-asm.S
+@@ -0,0 +1,309 @@
++/*
++ * Copyright © 2009 Nokia Corporation
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that
++ * copyright notice and this permission notice appear in supporting
++ * documentation, and that the name of Nokia Corporation not be used in
++ * advertising or publicity pertaining to distribution of the software without
++ * specific, written prior permission. Nokia Corporation makes no
++ * representations about the suitability of this software for any purpose.
++ * It is provided "as is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
++ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
++ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
++ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
++ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
++ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
++ * SOFTWARE.
++ *
++ * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
++ */
++
++/* Prevent the stack from becoming executable for no reason... */
++#if defined(__linux__) && defined(__ELF__)
++.section .note.GNU-stack,"",%progbits
++#endif
++
++ .text
++ .fpu neon
++ .altmacro
++
++#include "pixman-arm-neon-asm.h"
++
++/*
++ * This file contains implementations of NEON optimized pixel processing
++ * functions functions. There is no full and detailed tutorial, but some
++ * functions (those which are exposing some new or interesting features)
++ * are extensively commented and can be used as examples.
++ *
++ * You may want to have a look at the following functions:
++ * - pixman_composite_over_8888_0565_asm_neon
++ */
++
++/*
++ * Implementation of pixman_composite_over_8888_0565_asm_neon
++ *
++ * This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and
++ * performs OVER compositing operation. Function fast_composite_over_8888_0565
++ * from pixman-fast-path.c does the same in C and can be used as a reference.
++ *
++ * First we need to have some NEON assembly code which can do the actual
++ * operation on the pixels and provide it to the template macro
++ *
++ * Template macro quite conveniently takes care of all the necessary code for
++ * memory reading and writing (including quite tricky cases of handling
++ * unaligned leading/trailing pixels), so we only need to deal with the data
++ * in NEON registers.
++ *
++ * NEON registers allocation in general is recommented to be the following:
++ * d0, d1, d2, d3 - contain loaded source pixel data
++ * d4, d5, d6, d7 - contain loaded destination pixels (if they are needed)
++ * d24, d25, d26, d27 - contain loading mask pixel data (if mask is used)
++ * d28, d29, d30, d31 - place for storing the result (destination pixels)
++ *
++ * As can be seen above, four 64-bit NEON registers are used for keeping
++ * intermediate pixel data and up to 8 pixels can be processed in one step
++ * for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp).
++ *
++ * This particular function uses the following allocation:
++ * d0, d1, d2, d3 - contain loaded source pixel data
++ * d4, d5 - contain loaded destination pixels (they are needed)
++ * d28, d29 - place for storing the result (destination pixels)
++ */
++
++/*
++ * Step one. We need to have some code to do some arithmetics on pixel data.
++ * This is implemented as a pair of macros: '*_head' and '*_tail'. When used
++ * back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5},
++ * perform all the needed calculations and write the result to {d28, d29}.
++ * The rationale for having two macros and not just one will be explained
++ * later. In practice, any single monolitic function which does the work can
++ * be split into two parts in any arbitrary way without affecting correctness.
++ *
++ * There is one special trick here too. Common template macro already makes
++ * our life a bit easier by doing R, G, B, A color components deinterleaving
++ * for 32bpp pixel formats. So it means that instead of having 8 packed
++ * pixels in {d0, d1, d2, d3} registers, we actually use d0 register for
++ * blue channel (a vector of eight 8-bit values), d1 register for green,
++ * d2 for red and d3 for alpha. There is no magic and simple conversion
++ * can be done with a few NEON instructions.
++ *
++ * Packed to planar conversion:
++ * vuzp.8 d0, d1
++ * vuzp.8 d2, d3
++ * vuzp.8 d1, d3
++ * vuzp.8 d0, d2
++ *
++ * Planar to packed conversion:
++ * vzip.8 d0, d2
++ * vzip.8 d1, d3
++ * vzip.8 d2, d3
++ * vzip.8 d0, d1
++ *
++ * Pixel can be loaded directly in planar format using VLD4.8 NEON
++ * instruction. But it is 1 cycle slower than VLD1.32 and sometimes
++ * code can be scheduled so that four extra VUZP.8 after VLD1.32 may
++ * be dual-issued with the other instructions resulting in overal
++ * 1 cycle improvement.
++ *
++ * But anyway, here is the code:
++ */
++.macro pixman_composite_over_8888_0565_process_pixblock_head
++ /* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format
++ and put data into d6 - red, d7 - green, d30 - blue */
++ vshrn.u16 d6, q2, #8
++ vshrn.u16 d7, q2, #3
++ vsli.u16 q2, q2, #5
++ vsri.u8 d6, d6, #5
++ vmvn.8 d3, d3 /* invert source alpha */
++ vsri.u8 d7, d7, #6
++ vshrn.u16 d30, q2, #2
++ /* now do alpha blending, storing results in 8-bit planar format
++ into d16 - red, d19 - green, d18 - blue */
++ vmull.u8 q10, d3, d6
++ vmull.u8 q11, d3, d7
++ vmull.u8 q12, d3, d30
++ vrshr.u16 q13, q10, #8
++ vrshr.u16 q3, q11, #8
++ vrshr.u16 q15, q12, #8
++ vraddhn.u16 d20, q10, q13
++ vraddhn.u16 d23, q11, q3
++ vraddhn.u16 d22, q12, q15
++.endm
++
++.macro pixman_composite_over_8888_0565_process_pixblock_tail
++ /* ... continue alpha blending */
++ vqadd.u8 d16, d2, d20
++ vqadd.u8 q9, q0, q11
++ /* convert the result to r5g6b5 and store it into {d28, d29} */
++ vshll.u8 q14, d16, #8
++ vshll.u8 q8, d19, #8
++ vshll.u8 q9, d18, #8
++ vsri.u16 q14, q8, #5
++ vsri.u16 q14, q9, #11
++.endm
++
++/*
++ * OK, now we got almost everything that we need. Using the above two
++ * macros, the work can be done right. But now we want to optimize
++ * it a bit. ARM Cortex-A8 is an in-order core, and benefits really
++ * a lot from good code scheduling and software pipelining.
++ *
++ * Let's construct some code, which will run in the core main loop.
++ * Some pseudo-code of the main loop will look like this:
++ * head
++ * while (...) {
++ * tail
++ * head
++ * }
++ * tail
++ *
++ * It may look a bit weird, but this setup allows to hide instruction
++ * latencies better and also utilize dual-issue capability more efficiently.
++ *
++ * So what we need now is a '*_tail_head' macro, which will be used
++ * in the core main loop. A trivial straightforward implementation
++ * of this macro would look like this:
++ *
++ * pixman_composite_over_8888_0565_process_pixblock_tail
++ * vst1.16 {d28, d29}, [DST_W, :128]!
++ * vld1.16 {d4, d5}, [DST_R, :128]!
++ * vld4.32 {d0, d1, d2, d3}, [SRC]!
++ * pixman_composite_over_8888_0565_process_pixblock_head
++ * cache_preload 8, 8
++ *
++ * Now it also got some VLD/VST instructions. We simply can't move from
++ * processing one block of pixels to the other one with just arithmetics.
++ * The previously processed data needs to be written to memory and new
++ * data needs to be fetched. Fortunately, this main loop does not deal
++ * with partial leading/trailing pixels and can load/store a full block
++ * of pixels in a bulk. Additionally, destination buffer is 16 bytes
++ * aligned here (which is good for performance).
++ *
++ * New things here are DST_R, DST_W, SRC and MASK identifiers. These
++ * are the aliases for ARM registers which are used as pointers for
++ * accessing data. We maintain separate pointers for reading and writing
++ * destination buffer.
++ *
++ * Another new thing is 'cache_preload' macro. It is used for prefetching
++ * data into CPU cache and improve performance when dealing with large
++ * images which are far larger than cache size. It uses one argument
++ * (actually two, but they need to be the same here) - number of pixels
++ * in a block. Looking into 'pixman-arm-neon-asm.h' can provide some
++ * details about this macro. Moreover, if good performance is needed
++ * the code from this macro needs to be copied into '*_tail_head' macro
++ * and mixed with the rest of code for optimal instructions scheduling.
++ * We are actually doing it below.
++ *
++ * Now after all the explanations, here is the optimized code.
++ * Different instruction streams (originaling from '*_head', '*_tail'
++ * and 'cache_preload' macro) use different indentation levels for
++ * better readability. Actually taking the code from one of these
++ * indentation levels and ignoring a few VLD/VST instructions would
++ * result in exactly the code from '*_head', '*_tail' or 'cache_preload'
++ * macro!
++ */
++
++#if 1
++
++.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
++ vqadd.u8 d16, d2, d20
++ vld1.16 {d4, d5}, [DST_R, :128]!
++ vqadd.u8 q9, q0, q11
++ vshrn.u16 d6, q2, #8
++ vld4.8 {d0, d1, d2, d3}, [SRC]!
++ vshrn.u16 d7, q2, #3
++ vsli.u16 q2, q2, #5
++ vshll.u8 q14, d16, #8
++ add PF_X, PF_X, #8
++ vshll.u8 q8, d19, #8
++ tst PF_CTL, #0xF
++ vsri.u8 d6, d6, #5
++ addne PF_X, PF_X, #8
++ vmvn.8 d3, d3
++ subne PF_CTL, PF_CTL, #1
++ vsri.u8 d7, d7, #6
++ vshrn.u16 d30, q2, #2
++ vmull.u8 q10, d3, d6
++ pld [PF_SRC, PF_X, lsl #src_bpp_shift]
++ vmull.u8 q11, d3, d7
++ vmull.u8 q12, d3, d30
++ pld [PF_DST, PF_X, lsl #dst_bpp_shift]
++ vsri.u16 q14, q8, #5
++ cmp PF_X, ORIG_W
++ vshll.u8 q9, d18, #8
++ vrshr.u16 q13, q10, #8
++ subge PF_X, PF_X, ORIG_W
++ vrshr.u16 q3, q11, #8
++ vrshr.u16 q15, q12, #8
++ subges PF_CTL, PF_CTL, #0x10
++ vsri.u16 q14, q9, #11
++ ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
++ vraddhn.u16 d20, q10, q13
++ vraddhn.u16 d23, q11, q3
++ ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
++ vraddhn.u16 d22, q12, q15
++ vst1.16 {d28, d29}, [DST_W, :128]!
++.endm
++
++#else
++
++/* If we did not care much about the performance, we would just use this... */
++.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
++ pixman_composite_over_8888_0565_process_pixblock_tail
++ vst1.16 {d28, d29}, [DST_W, :128]!
++ vld1.16 {d4, d5}, [DST_R, :128]!
++ vld4.32 {d0, d1, d2, d3}, [SRC]!
++ pixman_composite_over_8888_0565_process_pixblock_head
++ cache_preload 8, 8
++.endm
++
++#endif
++
++/*
++ * And now the final part. We are using 'generate_composite_function' macro
++ * to put all the stuff together. We are specifying the name of the function
++ * which we want to get, number of bits per pixel for the source, mask and
++ * destination (0 if unused, like mask in this case). Next come some bit
++ * flags:
++ * FLAG_DST_READWRITE - tells that the destination buffer is both read
++ * and written, for write-only buffer we would use
++ * FLAG_DST_WRITEONLY flag instead
++ * FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data
++ * and separate color channels for 32bpp format.
++ * The next things are:
++ * - the number of pixels processed per iteration (8 in this case, because
++ * that' the maximum what can fit into four 64-bit NEON registers).
++ * - prefetch distance, measured in pixel blocks. In this case it is 5 times
++ * by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal
++ * prefetch distance can be selected by running some benchmarks.
++ *
++ * After that we specify some macros, these are 'default_init',
++ * 'default_cleanup' (it is possible to have custom init/cleanup to be
++ * able to save/restore some extra NEON registers like d8-d15 or do
++ * anything else) followed by
++ * 'pixman_composite_over_8888_0565_process_pixblock_head',
++ * 'pixman_composite_over_8888_0565_process_pixblock_tail' and
++ * 'pixman_composite_over_8888_0565_process_pixblock_tail_head'
++ * which we got implemented above.
++ *
++ * The last part is the NEON registers allocation scheme.
++ */
++generate_composite_function \
++ pixman_composite_over_8888_0565_asm_neon, 32, 0, 16, \
++ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
++ 8, /* number of pixels, processed in a single block */ \
++ 5, /* prefetch distance */ \
++ default_init, \
++ default_cleanup, \
++ pixman_composite_over_8888_0565_process_pixblock_head, \
++ pixman_composite_over_8888_0565_process_pixblock_tail, \
++ pixman_composite_over_8888_0565_process_pixblock_tail_head, \
++ 28, /* dst_w_basereg */ \
++ 4, /* dst_r_basereg */ \
++ 0, /* src_basereg */ \
++ 24 /* mask_basereg */
+diff --git a/pixman/pixman-arm-neon-asm.h b/pixman/pixman-arm-neon-asm.h
+new file mode 100644
+index 0000000..d276ab9
+--- /dev/null
++++ b/pixman/pixman-arm-neon-asm.h
+@@ -0,0 +1,620 @@
++/*
++ * Copyright © 2009 Nokia Corporation
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that
++ * copyright notice and this permission notice appear in supporting
++ * documentation, and that the name of Nokia Corporation not be used in
++ * advertising or publicity pertaining to distribution of the software without
++ * specific, written prior permission. Nokia Corporation makes no
++ * representations about the suitability of this software for any purpose.
++ * It is provided "as is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
++ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
++ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
++ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
++ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
++ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
++ * SOFTWARE.
++ *
++ * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
++ */
++
++/*
++ * This file contains a macro ('generate_composite_function') which can
++ * construct 2D image processing functions, based on a common template.
++ * Any combinations of source, destination and mask images with 8bpp,
++ * 16bpp, 32bpp color formats are supported.
++ *
++ * This macro takes care of:
++ * - handling of leading and trailing unaligned pixels
++ * - doing most of the work related to L2 cache preload
++ * - encourages the use of software pipelining for better instructions
++ * scheduling
++ *
++ * The user of this macro has to provide some configuration parameters
++ * (bit depths for the images, prefetch distance, etc.) and a set of
++ * macros, which should implement basic code chunks responsible for
++ * pixels processing. See 'pixman-arm-neon-asm.S' file for the usage
++ * examples.
++ *
++ * TODO:
++ * - support for 24bpp formats
++ * - try overlapped pixel method (from Ian Rickards) when processing
++ * exactly two blocks of pixels
++ */
++
++.set FLAG_DST_WRITEONLY, 0
++.set FLAG_DST_READWRITE, 1
++.set FLAG_DEINTERLEAVE_32BPP, 2
++
++/*
++ * It is possible to set this to 0 and improve performance a bit if unaligned
++ * memory accesses are supported
++ */
++#define RESPECT_STRICT_ALIGNMENT 1
++
++/*
++ * Definitions of supplementary pixld/pixst macros (for partial load/store of
++ * pixel data)
++ */
++
++.macro pixldst1 op, elem_size, reg1, mem_operand, abits
++.if abits > 0
++ op&.&elem_size {d&reg1}, [&mem_operand&, :&abits&]!
++.else
++ op&.&elem_size {d&reg1}, [&mem_operand&]!
++.endif
++.endm
++
++.macro pixldst2 op, elem_size, reg1, reg2, mem_operand, abits
++.if abits > 0
++ op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&, :&abits&]!
++.else
++ op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&]!
++.endif
++.endm
++
++.macro pixldst4 op, elem_size, reg1, reg2, reg3, reg4, mem_operand, abits
++.if abits > 0
++ op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&, :&abits&]!
++.else
++ op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&]!
++.endif
++.endm
++
++.macro pixldst0 op, elem_size, reg1, idx, mem_operand, abits
++ op&.&elem_size {d&reg1[idx]}, [&mem_operand&]!
++.endm
++
++.macro pixldst numbytes, op, elem_size, basereg, mem_operand, abits
++.if numbytes == 32
++ pixldst4 op, elem_size, %(basereg+4), %(basereg+5), \
++ %(basereg+6), %(basereg+7), mem_operand, abits
++.elseif numbytes == 16
++ pixldst2 op, elem_size, %(basereg+2), %(basereg+3), mem_operand, abits
++.elseif numbytes == 8
++ pixldst1 op, elem_size, %(basereg+1), mem_operand, abits
++.elseif numbytes == 4
++ .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 32)
++ pixldst0 op, 32, %(basereg+0), 1, mem_operand, abits
++ .elseif elem_size == 16
++ pixldst0 op, 16, %(basereg+0), 2, mem_operand, abits
++ pixldst0 op, 16, %(basereg+0), 3, mem_operand, abits
++ .else
++ pixldst0 op, 8, %(basereg+0), 4, mem_operand, abits
++ pixldst0 op, 8, %(basereg+0), 5, mem_operand, abits
++ pixldst0 op, 8, %(basereg+0), 6, mem_operand, abits
++ pixldst0 op, 8, %(basereg+0), 7, mem_operand, abits
++ .endif
++.elseif numbytes == 2
++ .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 16)
++ pixldst0 op, 16, %(basereg+0), 1, mem_operand, abits
++ .else
++ pixldst0 op, 8, %(basereg+0), 2, mem_operand, abits
++ pixldst0 op, 8, %(basereg+0), 3, mem_operand, abits
++ .endif
++.elseif numbytes == 1
++ pixldst0 op, 8, %(basereg+0), 1, mem_operand, abits
++.else
++ .error "unsupported size: numbytes"
++.endif
++.endm
++
++.macro pixld numpix, bpp, basereg, mem_operand, abits=0
++.if bpp > 0
++.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
++ pixldst4 vld4, 8, %(basereg+4), %(basereg+5), \
++ %(basereg+6), %(basereg+7), mem_operand, abits
++.else
++ pixldst %(numpix * bpp / 8), vld1, %(bpp), basereg, mem_operand, abits
++.endif
++.endif
++.endm
++
++.macro pixst numpix, bpp, basereg, mem_operand, abits=0
++.if bpp > 0
++.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
++ pixldst4 vst4, 8, %(basereg+4), %(basereg+5), \
++ %(basereg+6), %(basereg+7), mem_operand, abits
++.else
++ pixldst %(numpix * bpp / 8), vst1, %(bpp), basereg, mem_operand, abits
++.endif
++.endif
++.endm
++
++.macro pixld_a numpix, bpp, basereg, mem_operand
++.if (bpp * numpix) <= 128
++ pixld numpix, bpp, basereg, mem_operand, %(bpp * numpix)
++.else
++ pixld numpix, bpp, basereg, mem_operand, 128
++.endif
++.endm
++
++.macro pixst_a numpix, bpp, basereg, mem_operand
++.if (bpp * numpix) <= 128
++ pixst numpix, bpp, basereg, mem_operand, %(bpp * numpix)
++.else
++ pixst numpix, bpp, basereg, mem_operand, 128
++.endif
++.endm
++
++.macro vuzp8 reg1, reg2
++ vuzp.8 d&reg1, d&reg2
++.endm
++
++.macro vzip8 reg1, reg2
++ vzip.8 d&reg1, d&reg2
++.endm
++
++/* deinterleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
++.macro pixdeinterleave bpp, basereg
++.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
++ vuzp8 %(basereg+0), %(basereg+1)
++ vuzp8 %(basereg+2), %(basereg+3)
++ vuzp8 %(basereg+1), %(basereg+3)
++ vuzp8 %(basereg+0), %(basereg+2)
++.endif
++.endm
++
++/* interleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
++.macro pixinterleave bpp, basereg
++.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
++ vzip8 %(basereg+0), %(basereg+2)
++ vzip8 %(basereg+1), %(basereg+3)
++ vzip8 %(basereg+2), %(basereg+3)
++ vzip8 %(basereg+0), %(basereg+1)
++.endif
++.endm
++
++/*
++ * This is a macro for implementing cache preload. The main idea is that
++ * cache preload logic is mostly independent from the rest of pixels
++ * processing code. It starts at the top left pixel and moves forward
++ * across pixels and can jump across lines. Prefetch distance is handled
++ * in an 'incremental' way: it starts from 0 and advances to the optimal
++ * distance over time. After reaching optimal prefetch distance, it is
++ * kept constant. There are some checks which prevent prefetching
++ * unneeded pixel lines below the image (but it still prefetch a bit
++ * more data on the right side of the image - not a big issue and may
++ * be actually helpful when rendering text glyphs). Additional trick is
++ * the use of LDR instruction for prefetch instead of PLD when moving to
++ * the next line, the point is that we have a high chance of getting TLB
++ * miss in this case, and PLD would be useless.
++ *
++ * This sounds like it may introduce a noticeable overhead (when working with
++ * fully cached data). But in reality, due to having a separate pipeline and
++ * instruction queue for NEON unit in ARM Cortex-A8, normal ARM code can
++ * execute simultaneously with NEON and be completely shadowed by it. Thus
++ * we get no performance overhead at all (*). This looks like a very nice
++ * feature of Cortex-A8, if used wisely. We don't have a hardware hardware
++ * prefetcher, but still can implement some rather advanced prefetch logic
++ * in sofware for almost zero cost!
++ *
++ * (*) The overhead of the prefetcher is visible when running some trivial
++ * pixels processing like simple copy. Anyway, having prefetch is a must
++ * when working with graphics data.
++ */
++.macro cache_preload std_increment, boost_increment
++.if (src_bpp_shift >= 0) || (dst_r_bpp != 0) || (mask_bpp_shift >= 0)
++.if regs_shortage
++ ldr ORIG_W, [sp] /* If we are short on regs, ORIG_W is kept on stack */
++.endif
++.if std_increment != 0
++ add PF_X, PF_X, #std_increment
++.endif
++ tst PF_CTL, #0xF
++ addne PF_X, PF_X, #boost_increment
++ subne PF_CTL, PF_CTL, #1
++ cmp PF_X, ORIG_W
++.if src_bpp_shift >= 0
++ pld [PF_SRC, PF_X, lsl #src_bpp_shift]
++.endif
++.if dst_r_bpp != 0
++ pld [PF_DST, PF_X, lsl #dst_bpp_shift]
++.endif
++.if mask_bpp_shift >= 0
++ pld [PF_MASK, PF_X, lsl #mask_bpp_shift]
++.endif
++ subge PF_X, PF_X, ORIG_W
++ subges PF_CTL, PF_CTL, #0x10
++.if src_bpp_shift >= 0
++ ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
++.endif
++.if dst_r_bpp != 0
++ ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
++.endif
++.if mask_bpp_shift >= 0
++ ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]!
++.endif
++.endif
++.endm
++
++/*
++ * Registers are allocated in the following way by default:
++ * d0, d1, d2, d3 - reserved for loading source pixel data
++ * d4, d5, d6, d7 - reserved for loading destination pixel data
++ * d24, d25, d26, d27 - reserved for loading mask pixel data
++ * d28, d29, d30, d31 - final destination pixel data for writeback to memory
++ */
++.macro generate_composite_function fname, \
++ src_bpp, \
++ mask_bpp, \
++ dst_w_bpp, \
++ flags, \
++ pixblock_size, \
++ prefetch_distance, \
++ init, \
++ cleanup, \
++ process_pixblock_head, \
++ process_pixblock_tail, \
++ process_pixblock_tail_head, \
++ dst_w_basereg = 28, \
++ dst_r_basereg = 4, \
++ src_basereg = 0, \
++ mask_basereg = 24
++
++ .global fname
++fname:
++
++ W .req r0 /* width (is updated during processing) */
++ H .req r1 /* height (is updated during processing) */
++ DST_W .req r2 /* destination buffer pointer for writes */
++ DST_STRIDE .req r3 /* destination image stride */
++ SRC .req r4 /* source buffer pointer */
++ SRC_STRIDE .req r5 /* source image stride */
++ DST_R .req r6 /* destination buffer pointer for reads */
++
++ MASK .req r7 /* mask pointer */
++ MASK_STRIDE .req r8 /* mask stride */
++
++ PF_CTL .req r9
++ PF_X .req r10
++ PF_SRC .req r11
++ PF_DST .req r12
++ PF_MASK .req r14
++
++.if mask_bpp == 0
++ ORIG_W .req r7 /* saved original width */
++ DUMMY .req r8 /* temporary register */
++ .set regs_shortage, 0
++.elseif src_bpp == 0
++ ORIG_W .req r4 /* saved original width */
++ DUMMY .req r5 /* temporary register */
++ .set regs_shortage, 0
++.else
++ ORIG_W .req r1 /* saved original width */
++ DUMMY .req r1 /* temporary register */
++ .set regs_shortage, 1
++.endif
++
++ push {r4-r12, lr}
++
++ .set mask_bpp_shift, -1
++
++.if src_bpp == 32
++ .set src_bpp_shift, 2
++.elseif src_bpp == 16
++ .set src_bpp_shift, 1
++.elseif src_bpp == 8
++ .set src_bpp_shift, 0
++.elseif src_bpp == 0
++ .set src_bpp_shift, -1
++.else
++ .error "requested src bpp (src_bpp) is not supported"
++.endif
++.if mask_bpp == 32
++ .set mask_bpp_shift, 2
++.elseif mask_bpp == 8
++ .set mask_bpp_shift, 0
++.elseif mask_bpp == 0
++ .set mask_bpp_shift, -1
++.else
++ .error "requested mask bpp (mask_bpp) is not supported"
++.endif
++.if dst_w_bpp == 32
++ .set dst_bpp_shift, 2
++.elseif dst_w_bpp == 16
++ .set dst_bpp_shift, 1
++.elseif dst_w_bpp == 8
++ .set dst_bpp_shift, 0
++.else
++ .error "requested dst bpp (dst_w_bpp) is not supported"
++.endif
++
++.if (((flags) & FLAG_DST_READWRITE) != 0)
++ .set dst_r_bpp, dst_w_bpp
++.else
++ .set dst_r_bpp, 0
++.endif
++.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
++ .set DEINTERLEAVE_32BPP_ENABLED, 1
++.else
++ .set DEINTERLEAVE_32BPP_ENABLED, 0
++.endif
++
++.if prefetch_distance < 0 || prefetch_distance > 15
++ .error "invalid prefetch distance (prefetch_distance)"
++.endif
++
++.if src_bpp > 0
++ ldr SRC, [sp, #40]
++.endif
++.if mask_bpp > 0
++ ldr MASK, [sp, #48]
++.endif
++ mov PF_X, #0
++.if src_bpp > 0
++ ldr SRC_STRIDE, [sp, #44]
++.endif
++.if mask_bpp > 0
++ ldr MASK_STRIDE, [sp, #52]
++.endif
++ mov DST_R, DST_W
++ mov PF_SRC, SRC
++ mov PF_DST, DST_R
++ mov PF_MASK, MASK
++ mov PF_CTL, H, lsl #4
++ /* pf_ctl = 10 | ((h - 1) << 4) */
++ add PF_CTL, #(prefetch_distance - 0x10)
++
++ init
++.if regs_shortage
++ push {r0, r1}
++.endif
++ subs H, H, #1
++.if regs_shortage
++ str H, [sp, #4] /* save updated height to stack */
++.else
++ mov ORIG_W, W
++.endif
++ blt 9f
++ cmp W, #(pixblock_size * 2)
++ blt 8f
++0:
++ /* ensure 16 byte alignment of the destination buffer */
++ tst DST_R, #0xF
++ beq 2f
++
++.irp lowbit, 1, 2, 4, 8, 16
++.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
++.if lowbit < 16 /* we don't need more than 16-byte alignment */
++ tst DST_R, #lowbit
++ beq 1f
++.endif
++ pixld (lowbit * 8 / dst_w_bpp), src_bpp, src_basereg, SRC
++ pixld (lowbit * 8 / dst_w_bpp), mask_bpp, mask_basereg, MASK
++.if dst_r_bpp > 0
++ pixld_a (lowbit * 8 / dst_r_bpp), dst_r_bpp, dst_r_basereg, DST_R
++.else
++ add DST_R, DST_R, #lowbit
++.endif
++ add PF_X, PF_X, #(lowbit * 8 / dst_w_bpp)
++ sub W, W, #(lowbit * 8 / dst_w_bpp)
++1:
++.endif
++.endr
++ pixdeinterleave src_bpp, src_basereg
++ pixdeinterleave mask_bpp, mask_basereg
++ pixdeinterleave dst_r_bpp, dst_r_basereg
++
++ process_pixblock_head
++ cache_preload 0, pixblock_size
++ process_pixblock_tail
++
++ pixinterleave dst_w_bpp, dst_w_basereg
++.irp lowbit, 1, 2, 4, 8, 16
++.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
++.if lowbit < 16 /* we don't need more than 16-byte alignment */
++ tst DST_W, #lowbit
++ beq 1f
++.endif
++ pixst_a (lowbit * 8 / dst_w_bpp), dst_w_bpp, dst_w_basereg, DST_W
++1:
++.endif
++.endr
++2:
++
++ pixld_a pixblock_size, dst_r_bpp, \
++ (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
++ pixld pixblock_size, src_bpp, \
++ (src_basereg - pixblock_size * src_bpp / 64), SRC
++ pixld pixblock_size, mask_bpp, \
++ (mask_basereg - pixblock_size * mask_bpp / 64), MASK
++ add PF_X, PF_X, #pixblock_size
++ process_pixblock_head
++ cache_preload 0, pixblock_size
++ subs W, W, #(pixblock_size * 2)
++ blt 2f
++1: /* innermost pipelined loop */
++ process_pixblock_tail_head
++ subs W, W, #pixblock_size
++ bge 1b
++2:
++ process_pixblock_tail
++ pixst_a pixblock_size, dst_w_bpp, \
++ (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
++
++ /* process up to (pixblock_size - 1) remaining pixels */
++ tst W, #(pixblock_size - 1)
++ beq 2f
++.irp chunk_size, 16, 8, 4, 2, 1
++.if pixblock_size > chunk_size
++ tst W, #chunk_size
++ beq 1f
++ pixld chunk_size, src_bpp, src_basereg, SRC
++ pixld chunk_size, mask_bpp, mask_basereg, MASK
++ pixld_a chunk_size, dst_r_bpp, dst_r_basereg, DST_R
++ add PF_X, PF_X, #chunk_size
++1:
++.endif
++.endr
++ pixdeinterleave src_bpp, src_basereg
++ pixdeinterleave mask_bpp, mask_basereg
++ pixdeinterleave dst_r_bpp, dst_r_basereg
++
++ process_pixblock_head
++ cache_preload 0, pixblock_size
++ process_pixblock_tail
++
++ pixinterleave dst_w_bpp, dst_w_basereg
++.irp chunk_size, 16, 8, 4, 2, 1
++.if pixblock_size > chunk_size
++ tst W, #chunk_size
++ beq 1f
++ pixst_a chunk_size, dst_w_bpp, dst_w_basereg, DST_W
++1:
++.endif
++.endr
++2:
++
++.if regs_shortage
++ ldrd W, [sp] /* load W and H (width and height) from stack */
++.else
++ mov W, ORIG_W
++.endif
++ add DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift
++.if src_bpp != 0
++ add SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift
++.endif
++.if mask_bpp != 0
++ add MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift
++.endif
++ sub DST_W, DST_W, W, lsl #dst_bpp_shift
++.if src_bpp != 0
++ sub SRC, SRC, W, lsl #src_bpp_shift
++.endif
++.if mask_bpp != 0
++ sub MASK, MASK, W, lsl #mask_bpp_shift
++.endif
++ subs H, H, #1
++ mov DST_R, DST_W
++.if regs_shortage
++ str H, [sp, #4] /* save updated height to stack */
++.endif
++ bge 0b
++.if regs_shortage
++ pop {r0, r1}
++.endif
++ cleanup
++ pop {r4-r12, pc} /* exit */
++
++8: /* handle small rectangle, width up to 15 pixels */
++ tst W, #pixblock_size
++ beq 1f
++ pixld pixblock_size, dst_r_bpp, \
++ (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
++ pixld pixblock_size, src_bpp, \
++ (src_basereg - pixblock_size * src_bpp / 64), SRC
++ pixld pixblock_size, mask_bpp, \
++ (mask_basereg - pixblock_size * mask_bpp / 64), MASK
++ process_pixblock_head
++ process_pixblock_tail
++ pixst pixblock_size, dst_w_bpp, \
++ (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
++1: /* process the remaining pixels, which do not fully fill one block */
++ tst W, #(pixblock_size - 1)
++ beq 2f
++.irp chunk_size, 16, 8, 4, 2, 1
++.if pixblock_size > chunk_size
++ tst W, #chunk_size
++ beq 1f
++ pixld chunk_size, src_bpp, src_basereg, SRC
++ pixld chunk_size, mask_bpp, mask_basereg, MASK
++ pixld chunk_size, dst_r_bpp, dst_r_basereg, DST_R
++1:
++.endif
++.endr
++ pixdeinterleave src_bpp, src_basereg
++ pixdeinterleave mask_bpp, mask_basereg
++ pixdeinterleave dst_r_bpp, dst_r_basereg
++ process_pixblock_head
++ process_pixblock_tail
++ pixinterleave dst_w_bpp, dst_w_basereg
++.irp chunk_size, 16, 8, 4, 2, 1
++.if pixblock_size > chunk_size
++ tst W, #chunk_size
++ beq 1f
++ pixst chunk_size, dst_w_bpp, dst_w_basereg, DST_W
++1:
++.endif
++.endr
++2:
++.if regs_shortage
++ ldrd W, [sp] /* load W and H (width and height) from stack */
++.else
++ mov W, ORIG_W
++.endif
++ add DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift
++.if src_bpp != 0
++ add SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift
++.endif
++.if mask_bpp != 0
++ add MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift
++.endif
++ sub DST_W, DST_W, W, lsl #dst_bpp_shift
++.if src_bpp != 0
++ sub SRC, SRC, W, lsl #src_bpp_shift
++.endif
++.if mask_bpp != 0
++ sub MASK, MASK, W, lsl #mask_bpp_shift
++.endif
++ subs H, H, #1
++ mov DST_R, DST_W
++.if regs_shortage
++ str H, [sp, #4] /* save updated height to stack */
++.endif
++ bge 8b
++9:
++.if regs_shortage
++ pop {r0, r1}
++.endif
++ cleanup
++ pop {r4-r12, pc} /* exit */
++
++ .unreq SRC
++ .unreq MASK
++ .unreq DST_R
++ .unreq DST_W
++ .unreq ORIG_W
++ .unreq W
++ .unreq H
++ .unreq SRC_STRIDE
++ .unreq DST_STRIDE
++ .unreq MASK_STRIDE
++ .unreq PF_CTL
++ .unreq PF_X
++ .unreq PF_SRC
++ .unreq PF_DST
++ .unreq PF_MASK
++ .unreq DUMMY
++.endm
++
++.macro default_init
++.endm
++
++.macro default_cleanup
++.endm
+diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
+index 9caef61..fe57daa 100644
+--- a/pixman/pixman-arm-neon.c
++++ b/pixman/pixman-arm-neon.c
+@@ -1901,8 +1901,63 @@ pixman_fill_neon (uint32_t *bits,
+ #endif
+ }
+
++/*
++ * Use GNU assembler optimizations only if we are completely sure that
++ * the target system has compatible ABI and calling conventions. This
++ * check can be updated/extended if more systems turn out to be actually
++ * compatible.
++ */
++#if defined(__linux__) && defined(__ARM_EABI__) && defined(USE_GCC_INLINE_ASM)
++#define USE_GNU_ASSEMBLER_ASM
++#endif
++
++#ifdef USE_GNU_ASSEMBLER_ASM
++
++void
++pixman_composite_over_8888_0565_asm_neon (int32_t w,
++ int32_t h,
++ uint16_t *dst,
++ int32_t dst_stride,
++ uint32_t *src,
++ int32_t src_stride);
++
++static void
++neon_composite_over_8888_0565 (pixman_implementation_t *imp,
++ pixman_op_t op,
++ pixman_image_t * src_image,
++ pixman_image_t * mask_image,
++ pixman_image_t * dst_image,
++ int32_t src_x,
++ int32_t src_y,
++ int32_t mask_x,
++ int32_t mask_y,
++ int32_t dest_x,
++ int32_t dest_y,
++ int32_t width,
++ int32_t height)
++{
++ uint16_t *dst_line;
++ uint32_t *src_line;
++ int32_t dst_stride, src_stride;
++
++ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t,
++ src_stride, src_line, 1);
++ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t,
++ dst_stride, dst_line, 1);
++
++ pixman_composite_over_8888_0565_asm_neon (width, height,
++ dst_line, dst_stride,
++ src_line, src_stride);
++}
++
++#endif
++
+ static const pixman_fast_path_t arm_neon_fast_path_array[] =
+ {
++#ifdef USE_GNU_ASSEMBLER_ASM
++ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, neon_composite_over_8888_0565, 0 },
++ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, neon_composite_over_8888_0565, 0 },
++#endif
+ { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, neon_composite_add_n_8_8, 0 },
+ { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, neon_composite_add_8000_8000, 0 },
+ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, neon_composite_over_n_8_0565, 0 },
+--
+1.6.2.4
+